Semantic Segmentation

Import Modules

In [1]:
# IMPORT MODULES
# Import Import Numpy, TensorFlow, Scipy, Keras
import sys
import time
import os
import numpy as np 
import pandas as pd 
from glob import glob
import cv2
import matplotlib.pyplot as plt
from skimage.segmentation import mark_boundaries
import pickle
import math
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from scipy.signal import find_peaks_cwt
import random
from sklearn.utils import shuffle
import csv
import tensorflow as tf
from tensorflow.contrib.layers import flatten
from tensorflow.examples.tutorials.mnist import input_data
import keras
from keras.datasets import cifar10
from keras.models import Sequential,model_from_json
from keras.layers import Dense,Dropout,Activation,Flatten
from keras.layers import Convolution2D,MaxPooling2D
from keras.layers import Flatten,Lambda,ELU
from keras.optimizers import SGD,Adam,RMSprop
from keras.layers.convolutional import Conv2D
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
from keras.preprocessing.image import ImageDataGenerator
from keras.initializers import glorot_uniform
from sklearn.cross_validation import train_test_split
import scipy.misc
from matplotlib.pyplot import imshow
from IPython.display import SVG
import json
from keras.models import Sequential, model_from_json
from collections import namedtuple
from keras.models import Model, load_model
from keras.layers import Input, BatchNormalization, Dropout, Lambda
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import concatenate
from keras.layers import UpSampling2D
import time
from skimage import data, color
from skimage.transform import rescale, resize, downscale_local_mean
%matplotlib inline
print('Import Modules')
Import Modules
Using TensorFlow backend.

Load Data

In [2]:
# LOAD DATA
# Change Directory
os.chdir('..')
In [3]:
# LOAD DATA
# Load Training Data Directories
train_image_dir = "Data/Images/Train/"
train_label_dir = "Data/Ground-Truth/Train/"
train_dirs = ["jena/", "zurich/", "weimar/", "ulm/", "tubingen/", "stuttgart/",
            "strasbourg/", "monchengladbach/", "krefeld/", "hanover/",
            "hamburg/", "erfurt/", "dusseldorf/", "darmstadt/", "cologne/",
            "bremen/", "bochum/", "aachen/"]
In [4]:
# LOAD DATA
# Define a Function to Convert Label Image to Binary Image
def create_binary_labels(img):
    # Create Binary Image
    binary_img = np.zeros((img.shape[0], img.shape[1]))
    # Create For Loops for Check ground Pixels
    for i in range(img.shape[0]):
        for j in range(img.shape[1]):
            if img[i,j][0] == 128 and img[i,j][1] == 64 and img[i,j][2] == 128:
                binary_img[i,j] = 255
            else:
                binary_img[i,j] = 0
    return binary_img
In [33]:
# LOAD DATA
# Define a Function to Perform Gamma Correction
def adjust_gamma(image, gamma=1.0):
    invGamma = 1.0/gamma
    table = np.array([((i/255.0) ** invGamma) * 255
      for i in np.arange(0, 256)]).astype("uint8")

    return cv2.LUT(image, table)

# Initialize Array
gamma_array = np.array([0.5, 0.65, 0.85, 1.0, 1.45, 1.75, 2.0])
In [6]:
# LOAD DATA
#Create Empty Array for Training Images
x_train = []
y_train = []

# Create a For Loop to Import Training Data
for dir_step, dir in enumerate(train_dirs):
    # Load Current Directory
    current_dir = train_image_dir + dir
    
    # Load Filenames in Current Directory
    filenames = os.listdir(current_dir)
    
    # Create For Loop for Import Images from Current Directory
    for step, filename in enumerate(filenames):
        # Print Current Loading Status
        if step % 10 == 0:
            print("Training Data Directory: %d/%d, Step: %d/%d" % (dir_step, len(train_dirs)-1,step, \
                                                                   len(filenames)-1))
        
        # Check Image ID
        image_id = filename.split("_left")[0]

        # Read the Image
        image_path = current_dir + filename
        image = cv2.imread(image_path)
        
        # Randomly Select Gamma
        gamma = random.choice(gamma_array)
    
        # Perform Gamma Correction
        image = adjust_gamma(image, gamma = gamma)

        # Resize the Image
        image_small = cv2.resize(image, (144,144), interpolation=cv2.INTER_NEAREST)
        
        # Flatten Image and Append 
        image_flat = image_small.flatten().tolist()
        x_train += image_flat

        # Read Label Image
        label_image_path = train_label_dir + dir + image_id + "_gtFine_color.png"
        label_image = cv2.imread(label_image_path)
        
        # Resize the Label Image
        label_image_small = cv2.resize(label_image, (144,144), interpolation=cv2.INTER_NEAREST)

        # Convert to Binary Image
        binary_label_image = create_binary_labels(label_image_small)
        
        # Flatten Image and Append 
        label_image_flat = binary_label_image.flatten().tolist()
        y_train += label_image_flat
Training Data Directory: 0/17, Step: 0/118
Training Data Directory: 0/17, Step: 10/118
Training Data Directory: 0/17, Step: 20/118
Training Data Directory: 0/17, Step: 30/118
Training Data Directory: 0/17, Step: 40/118
Training Data Directory: 0/17, Step: 50/118
Training Data Directory: 0/17, Step: 60/118
Training Data Directory: 0/17, Step: 70/118
Training Data Directory: 0/17, Step: 80/118
Training Data Directory: 0/17, Step: 90/118
Training Data Directory: 0/17, Step: 100/118
Training Data Directory: 0/17, Step: 110/118
Training Data Directory: 1/17, Step: 0/121
Training Data Directory: 1/17, Step: 10/121
Training Data Directory: 1/17, Step: 20/121
Training Data Directory: 1/17, Step: 30/121
Training Data Directory: 1/17, Step: 40/121
Training Data Directory: 1/17, Step: 50/121
Training Data Directory: 1/17, Step: 60/121
Training Data Directory: 1/17, Step: 70/121
Training Data Directory: 1/17, Step: 80/121
Training Data Directory: 1/17, Step: 90/121
Training Data Directory: 1/17, Step: 100/121
Training Data Directory: 1/17, Step: 110/121
Training Data Directory: 1/17, Step: 120/121
Training Data Directory: 2/17, Step: 0/141
Training Data Directory: 2/17, Step: 10/141
Training Data Directory: 2/17, Step: 20/141
Training Data Directory: 2/17, Step: 30/141
Training Data Directory: 2/17, Step: 40/141
Training Data Directory: 2/17, Step: 50/141
Training Data Directory: 2/17, Step: 60/141
Training Data Directory: 2/17, Step: 70/141
Training Data Directory: 2/17, Step: 80/141
Training Data Directory: 2/17, Step: 90/141
Training Data Directory: 2/17, Step: 100/141
Training Data Directory: 2/17, Step: 110/141
Training Data Directory: 2/17, Step: 120/141
Training Data Directory: 2/17, Step: 130/141
Training Data Directory: 2/17, Step: 140/141
Training Data Directory: 3/17, Step: 0/94
Training Data Directory: 3/17, Step: 10/94
Training Data Directory: 3/17, Step: 20/94
Training Data Directory: 3/17, Step: 30/94
Training Data Directory: 3/17, Step: 40/94
Training Data Directory: 3/17, Step: 50/94
Training Data Directory: 3/17, Step: 60/94
Training Data Directory: 3/17, Step: 70/94
Training Data Directory: 3/17, Step: 80/94
Training Data Directory: 3/17, Step: 90/94
Training Data Directory: 4/17, Step: 0/143
Training Data Directory: 4/17, Step: 10/143
Training Data Directory: 4/17, Step: 20/143
Training Data Directory: 4/17, Step: 30/143
Training Data Directory: 4/17, Step: 40/143
Training Data Directory: 4/17, Step: 50/143
Training Data Directory: 4/17, Step: 60/143
Training Data Directory: 4/17, Step: 70/143
Training Data Directory: 4/17, Step: 80/143
Training Data Directory: 4/17, Step: 90/143
Training Data Directory: 4/17, Step: 100/143
Training Data Directory: 4/17, Step: 110/143
Training Data Directory: 4/17, Step: 120/143
Training Data Directory: 4/17, Step: 130/143
Training Data Directory: 4/17, Step: 140/143
Training Data Directory: 5/17, Step: 0/195
Training Data Directory: 5/17, Step: 10/195
Training Data Directory: 5/17, Step: 20/195
Training Data Directory: 5/17, Step: 30/195
Training Data Directory: 5/17, Step: 40/195
Training Data Directory: 5/17, Step: 50/195
Training Data Directory: 5/17, Step: 60/195
Training Data Directory: 5/17, Step: 70/195
Training Data Directory: 5/17, Step: 80/195
Training Data Directory: 5/17, Step: 90/195
Training Data Directory: 5/17, Step: 100/195
Training Data Directory: 5/17, Step: 110/195
Training Data Directory: 5/17, Step: 120/195
Training Data Directory: 5/17, Step: 130/195
Training Data Directory: 5/17, Step: 140/195
Training Data Directory: 5/17, Step: 150/195
Training Data Directory: 5/17, Step: 160/195
Training Data Directory: 5/17, Step: 170/195
Training Data Directory: 5/17, Step: 180/195
Training Data Directory: 5/17, Step: 190/195
Training Data Directory: 6/17, Step: 0/364
Training Data Directory: 6/17, Step: 10/364
Training Data Directory: 6/17, Step: 20/364
Training Data Directory: 6/17, Step: 30/364
Training Data Directory: 6/17, Step: 40/364
Training Data Directory: 6/17, Step: 50/364
Training Data Directory: 6/17, Step: 60/364
Training Data Directory: 6/17, Step: 70/364
Training Data Directory: 6/17, Step: 80/364
Training Data Directory: 6/17, Step: 90/364
Training Data Directory: 6/17, Step: 100/364
Training Data Directory: 6/17, Step: 110/364
Training Data Directory: 6/17, Step: 120/364
Training Data Directory: 6/17, Step: 130/364
Training Data Directory: 6/17, Step: 140/364
Training Data Directory: 6/17, Step: 150/364
Training Data Directory: 6/17, Step: 160/364
Training Data Directory: 6/17, Step: 170/364
Training Data Directory: 6/17, Step: 180/364
Training Data Directory: 6/17, Step: 190/364
Training Data Directory: 6/17, Step: 200/364
Training Data Directory: 6/17, Step: 210/364
Training Data Directory: 6/17, Step: 220/364
Training Data Directory: 6/17, Step: 230/364
Training Data Directory: 6/17, Step: 240/364
Training Data Directory: 6/17, Step: 250/364
Training Data Directory: 6/17, Step: 260/364
Training Data Directory: 6/17, Step: 270/364
Training Data Directory: 6/17, Step: 280/364
Training Data Directory: 6/17, Step: 290/364
Training Data Directory: 6/17, Step: 300/364
Training Data Directory: 6/17, Step: 310/364
Training Data Directory: 6/17, Step: 320/364
Training Data Directory: 6/17, Step: 330/364
Training Data Directory: 6/17, Step: 340/364
Training Data Directory: 6/17, Step: 350/364
Training Data Directory: 6/17, Step: 360/364
Training Data Directory: 7/17, Step: 0/93
Training Data Directory: 7/17, Step: 10/93
Training Data Directory: 7/17, Step: 20/93
Training Data Directory: 7/17, Step: 30/93
Training Data Directory: 7/17, Step: 40/93
Training Data Directory: 7/17, Step: 50/93
Training Data Directory: 7/17, Step: 60/93
Training Data Directory: 7/17, Step: 70/93
Training Data Directory: 7/17, Step: 80/93
Training Data Directory: 7/17, Step: 90/93
Training Data Directory: 8/17, Step: 0/98
Training Data Directory: 8/17, Step: 10/98
Training Data Directory: 8/17, Step: 20/98
Training Data Directory: 8/17, Step: 30/98
Training Data Directory: 8/17, Step: 40/98
Training Data Directory: 8/17, Step: 50/98
Training Data Directory: 8/17, Step: 60/98
Training Data Directory: 8/17, Step: 70/98
Training Data Directory: 8/17, Step: 80/98
Training Data Directory: 8/17, Step: 90/98
Training Data Directory: 9/17, Step: 0/195
Training Data Directory: 9/17, Step: 10/195
Training Data Directory: 9/17, Step: 20/195
Training Data Directory: 9/17, Step: 30/195
Training Data Directory: 9/17, Step: 40/195
Training Data Directory: 9/17, Step: 50/195
Training Data Directory: 9/17, Step: 60/195
Training Data Directory: 9/17, Step: 70/195
Training Data Directory: 9/17, Step: 80/195
Training Data Directory: 9/17, Step: 90/195
Training Data Directory: 9/17, Step: 100/195
Training Data Directory: 9/17, Step: 110/195
Training Data Directory: 9/17, Step: 120/195
Training Data Directory: 9/17, Step: 130/195
Training Data Directory: 9/17, Step: 140/195
Training Data Directory: 9/17, Step: 150/195
Training Data Directory: 9/17, Step: 160/195
Training Data Directory: 9/17, Step: 170/195
Training Data Directory: 9/17, Step: 180/195
Training Data Directory: 9/17, Step: 190/195
Training Data Directory: 10/17, Step: 0/247
Training Data Directory: 10/17, Step: 10/247
Training Data Directory: 10/17, Step: 20/247
Training Data Directory: 10/17, Step: 30/247
Training Data Directory: 10/17, Step: 40/247
Training Data Directory: 10/17, Step: 50/247
Training Data Directory: 10/17, Step: 60/247
Training Data Directory: 10/17, Step: 70/247
Training Data Directory: 10/17, Step: 80/247
Training Data Directory: 10/17, Step: 90/247
Training Data Directory: 10/17, Step: 100/247
Training Data Directory: 10/17, Step: 110/247
Training Data Directory: 10/17, Step: 120/247
Training Data Directory: 10/17, Step: 130/247
Training Data Directory: 10/17, Step: 140/247
Training Data Directory: 10/17, Step: 150/247
Training Data Directory: 10/17, Step: 160/247
Training Data Directory: 10/17, Step: 170/247
Training Data Directory: 10/17, Step: 180/247
Training Data Directory: 10/17, Step: 190/247
Training Data Directory: 10/17, Step: 200/247
Training Data Directory: 10/17, Step: 210/247
Training Data Directory: 10/17, Step: 220/247
Training Data Directory: 10/17, Step: 230/247
Training Data Directory: 10/17, Step: 240/247
Training Data Directory: 11/17, Step: 0/108
Training Data Directory: 11/17, Step: 10/108
Training Data Directory: 11/17, Step: 20/108
Training Data Directory: 11/17, Step: 30/108
Training Data Directory: 11/17, Step: 40/108
Training Data Directory: 11/17, Step: 50/108
Training Data Directory: 11/17, Step: 60/108
Training Data Directory: 11/17, Step: 70/108
Training Data Directory: 11/17, Step: 80/108
Training Data Directory: 11/17, Step: 90/108
Training Data Directory: 11/17, Step: 100/108
Training Data Directory: 12/17, Step: 0/220
Training Data Directory: 12/17, Step: 10/220
Training Data Directory: 12/17, Step: 20/220
Training Data Directory: 12/17, Step: 30/220
Training Data Directory: 12/17, Step: 40/220
Training Data Directory: 12/17, Step: 50/220
Training Data Directory: 12/17, Step: 60/220
Training Data Directory: 12/17, Step: 70/220
Training Data Directory: 12/17, Step: 80/220
Training Data Directory: 12/17, Step: 90/220
Training Data Directory: 12/17, Step: 100/220
Training Data Directory: 12/17, Step: 110/220
Training Data Directory: 12/17, Step: 120/220
Training Data Directory: 12/17, Step: 130/220
Training Data Directory: 12/17, Step: 140/220
Training Data Directory: 12/17, Step: 150/220
Training Data Directory: 12/17, Step: 160/220
Training Data Directory: 12/17, Step: 170/220
Training Data Directory: 12/17, Step: 180/220
Training Data Directory: 12/17, Step: 190/220
Training Data Directory: 12/17, Step: 200/220
Training Data Directory: 12/17, Step: 210/220
Training Data Directory: 12/17, Step: 220/220
Training Data Directory: 13/17, Step: 0/84
Training Data Directory: 13/17, Step: 10/84
Training Data Directory: 13/17, Step: 20/84
Training Data Directory: 13/17, Step: 30/84
Training Data Directory: 13/17, Step: 40/84
Training Data Directory: 13/17, Step: 50/84
Training Data Directory: 13/17, Step: 60/84
Training Data Directory: 13/17, Step: 70/84
Training Data Directory: 13/17, Step: 80/84
Training Data Directory: 14/17, Step: 0/153
Training Data Directory: 14/17, Step: 10/153
Training Data Directory: 14/17, Step: 20/153
Training Data Directory: 14/17, Step: 30/153
Training Data Directory: 14/17, Step: 40/153
Training Data Directory: 14/17, Step: 50/153
Training Data Directory: 14/17, Step: 60/153
Training Data Directory: 14/17, Step: 70/153
Training Data Directory: 14/17, Step: 80/153
Training Data Directory: 14/17, Step: 90/153
Training Data Directory: 14/17, Step: 100/153
Training Data Directory: 14/17, Step: 110/153
Training Data Directory: 14/17, Step: 120/153
Training Data Directory: 14/17, Step: 130/153
Training Data Directory: 14/17, Step: 140/153
Training Data Directory: 14/17, Step: 150/153
Training Data Directory: 15/17, Step: 0/315
Training Data Directory: 15/17, Step: 10/315
Training Data Directory: 15/17, Step: 20/315
Training Data Directory: 15/17, Step: 30/315
Training Data Directory: 15/17, Step: 40/315
Training Data Directory: 15/17, Step: 50/315
Training Data Directory: 15/17, Step: 60/315
Training Data Directory: 15/17, Step: 70/315
Training Data Directory: 15/17, Step: 80/315
Training Data Directory: 15/17, Step: 90/315
Training Data Directory: 15/17, Step: 100/315
Training Data Directory: 15/17, Step: 110/315
Training Data Directory: 15/17, Step: 120/315
Training Data Directory: 15/17, Step: 130/315
Training Data Directory: 15/17, Step: 140/315
Training Data Directory: 15/17, Step: 150/315
Training Data Directory: 15/17, Step: 160/315
Training Data Directory: 15/17, Step: 170/315
Training Data Directory: 15/17, Step: 180/315
Training Data Directory: 15/17, Step: 190/315
Training Data Directory: 15/17, Step: 200/315
Training Data Directory: 15/17, Step: 210/315
Training Data Directory: 15/17, Step: 220/315
Training Data Directory: 15/17, Step: 230/315
Training Data Directory: 15/17, Step: 240/315
Training Data Directory: 15/17, Step: 250/315
Training Data Directory: 15/17, Step: 260/315
Training Data Directory: 15/17, Step: 270/315
Training Data Directory: 15/17, Step: 280/315
Training Data Directory: 15/17, Step: 290/315
Training Data Directory: 15/17, Step: 300/315
Training Data Directory: 15/17, Step: 310/315
Training Data Directory: 16/17, Step: 0/95
Training Data Directory: 16/17, Step: 10/95
Training Data Directory: 16/17, Step: 20/95
Training Data Directory: 16/17, Step: 30/95
Training Data Directory: 16/17, Step: 40/95
Training Data Directory: 16/17, Step: 50/95
Training Data Directory: 16/17, Step: 60/95
Training Data Directory: 16/17, Step: 70/95
Training Data Directory: 16/17, Step: 80/95
Training Data Directory: 16/17, Step: 90/95
Training Data Directory: 17/17, Step: 0/173
Training Data Directory: 17/17, Step: 10/173
Training Data Directory: 17/17, Step: 20/173
Training Data Directory: 17/17, Step: 30/173
Training Data Directory: 17/17, Step: 40/173
Training Data Directory: 17/17, Step: 50/173
Training Data Directory: 17/17, Step: 60/173
Training Data Directory: 17/17, Step: 70/173
Training Data Directory: 17/17, Step: 80/173
Training Data Directory: 17/17, Step: 90/173
Training Data Directory: 17/17, Step: 100/173
Training Data Directory: 17/17, Step: 110/173
Training Data Directory: 17/17, Step: 120/173
Training Data Directory: 17/17, Step: 130/173
Training Data Directory: 17/17, Step: 140/173
Training Data Directory: 17/17, Step: 150/173
Training Data Directory: 17/17, Step: 160/173
Training Data Directory: 17/17, Step: 170/173
In [7]:
# LOAD DATA
# Check Size of Data
image_feature_size=int(len(x_train))
print("Feature Size:", image_feature_size)

# Revert to the Original Image Shapes
x_train = np.array(x_train).reshape(2975, 144, 144, 3)
print("Feature Size:", x_train.shape)

# Check Size of Data
label_feature_size=int(len(y_train))
print("Feature Size:", label_feature_size)

# Revert to the Original Image Shapes
y_train = np.array(y_train).reshape(2975, 144, 144, 1)
print("Feature Size:", y_train.shape)
Feature Size: 185068800
Feature Size: (2975, 144, 144, 3)
Feature Size: 61689600
Feature Size: (2975, 144, 144, 1)
In [8]:
# LOAD DATA
# Plot Images
index = random.randint(0,2975)
fig, (axis1, axis2) = plt.subplots(1,2,figsize=(5,5))
axis1.imshow(x_train[index])
axis2.imshow(np.array(y_train[index]).reshape(144,144))
axis1.set_title('Original Image:',fontsize=10)
axis2.set_title('Label Image:',fontsize=10)
Out[8]:
Text(0.5,1,'Label Image:')
In [9]:
# LOAD DATA
# Load Training Data Directories
valid_image_dir = "Data/Images/Val/"
valid_label_dir = "Data/Ground-Truth/Val/"
valid_dirs = ["frankfurt/", "lindau/", "munster/"]
In [37]:
# LOAD DATA
#Create Empty Array for Validation Images
x_valid = []
y_valid = []

# Create a For Loop to Import Validation Data
for dir_step, dir in enumerate(valid_dirs):
    # Load Current Directory
    current_dir = valid_image_dir + dir
    
    # Load Filenames in Current Directory
    filenames = os.listdir(current_dir)
    
    # Create For Loop for Import Images from Current Directory
    for step, filename in enumerate(filenames):
        # Print Current Loading Status
        if step % 10 == 0:
            print("Validation Data Directory: %d/%d, Step: %d/%d" % (dir_step, len(valid_dirs)-1,step, \
                                                                     len(filenames)-1))
        
        # Check Image ID
        image_id = filename.split("_left")[0]

        # Read the Image
        image_path = current_dir + filename
        image = cv2.imread(image_path)
        
        # Randomly Select Gamma
        gamma = random.choice(gamma_array)
    
        # Perform Gamma Correction
        #image = adjust_gamma(image, gamma = gamma)

        # Resize the Image
        image_small = cv2.resize(image, (144,144), interpolation=cv2.INTER_NEAREST)
        
        # Flatten Image and Append 
        image_flat = image_small.flatten().tolist()
        x_valid += image_flat

        # Read Label Image
        label_image_path = valid_label_dir + dir + image_id + "_gtFine_color.png"
        label_image = cv2.imread(label_image_path)
        
        # Resize the Label Image
        label_image_small = cv2.resize(label_image, (144,144), interpolation=cv2.INTER_NEAREST)

        # Convert to Binary Image
        binary_label_image = create_binary_labels(label_image_small)
        
        # Flatten Image and Append 
        label_image_flat = binary_label_image.flatten().tolist()
        y_valid += label_image_flat
Validation Data Directory: 0/2, Step: 0/266
Validation Data Directory: 0/2, Step: 10/266
Validation Data Directory: 0/2, Step: 20/266
Validation Data Directory: 0/2, Step: 30/266
Validation Data Directory: 0/2, Step: 40/266
Validation Data Directory: 0/2, Step: 50/266
Validation Data Directory: 0/2, Step: 60/266
Validation Data Directory: 0/2, Step: 70/266
Validation Data Directory: 0/2, Step: 80/266
Validation Data Directory: 0/2, Step: 90/266
Validation Data Directory: 0/2, Step: 100/266
Validation Data Directory: 0/2, Step: 110/266
Validation Data Directory: 0/2, Step: 120/266
Validation Data Directory: 0/2, Step: 130/266
Validation Data Directory: 0/2, Step: 140/266
Validation Data Directory: 0/2, Step: 150/266
Validation Data Directory: 0/2, Step: 160/266
Validation Data Directory: 0/2, Step: 170/266
Validation Data Directory: 0/2, Step: 180/266
Validation Data Directory: 0/2, Step: 190/266
Validation Data Directory: 0/2, Step: 200/266
Validation Data Directory: 0/2, Step: 210/266
Validation Data Directory: 0/2, Step: 220/266
Validation Data Directory: 0/2, Step: 230/266
Validation Data Directory: 0/2, Step: 240/266
Validation Data Directory: 0/2, Step: 250/266
Validation Data Directory: 0/2, Step: 260/266
Validation Data Directory: 1/2, Step: 0/58
Validation Data Directory: 1/2, Step: 10/58
Validation Data Directory: 1/2, Step: 20/58
Validation Data Directory: 1/2, Step: 30/58
Validation Data Directory: 1/2, Step: 40/58
Validation Data Directory: 1/2, Step: 50/58
Validation Data Directory: 2/2, Step: 0/173
Validation Data Directory: 2/2, Step: 10/173
Validation Data Directory: 2/2, Step: 20/173
Validation Data Directory: 2/2, Step: 30/173
Validation Data Directory: 2/2, Step: 40/173
Validation Data Directory: 2/2, Step: 50/173
Validation Data Directory: 2/2, Step: 60/173
Validation Data Directory: 2/2, Step: 70/173
Validation Data Directory: 2/2, Step: 80/173
Validation Data Directory: 2/2, Step: 90/173
Validation Data Directory: 2/2, Step: 100/173
Validation Data Directory: 2/2, Step: 110/173
Validation Data Directory: 2/2, Step: 120/173
Validation Data Directory: 2/2, Step: 130/173
Validation Data Directory: 2/2, Step: 140/173
Validation Data Directory: 2/2, Step: 150/173
Validation Data Directory: 2/2, Step: 160/173
Validation Data Directory: 2/2, Step: 170/173
In [39]:
# LOAD DATA
# Check Size of Data
image_feature_size=int(len(x_valid))
print("Feature Size:", image_feature_size)

# Revert to the Original Image Shapes
x_valid = np.array(x_valid).reshape(500, 144, 144, 3)
print("Feature Size:", x_valid.shape)

# Check Size of Data
label_feature_size=int(len(y_valid))
print("Feature Size:", label_feature_size)

# Revert to the Original Image Shapes
y_valid = np.array(y_valid).reshape(500, 144, 144, 1)
print("Feature Size:", y_valid.shape)
Feature Size: 31104000
Feature Size: (500, 144, 144, 3)
Feature Size: 10368000
Feature Size: (500, 144, 144, 1)

Create Model

In [12]:
# CREATE MODEL
# Define a Function for Convolutional Block
def convolutional_block(input_tensor, depth, kernel, strides=(1, 1), padding="SAME", batchnorm = False):
    layer = tf.layers.conv2d(input_tensor, \
                             filters = depth, \
                             kernel_size = kernel, \
                             strides = strides, \
                             padding = padding, \
                             activation = tf.nn.relu)
    layer = tf.layers.dropout(layer, rate = 0.20, training = True)
    if batchnorm:
        layer = tf.layers.batch_normalization(layer, training = False)
        
    return layer
In [13]:
# CREATE MODEL
# Define a Function for Deconvolutional Block
def deconvolutional_block(input_tensor, filter_size, output_size, \
             out_channels, in_channels, \
             strides = [1, 1, 1, 1], name = False):
    input_shape = tf.shape(input_tensor)
    batch_size = input_shape[0]
    out_shape = tf.stack([batch_size, output_size, output_size, out_channels])
    filter_shape = [filter_size, filter_size, out_channels, in_channels]
    weights = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.05))
    if name: 
        layer = tf.nn.conv2d_transpose(input_tensor, weights, out_shape, strides, padding = 'SAME', name = 'y_pred')
    else:
        layer = tf.nn.conv2d_transpose(input_tensor, weights, out_shape, strides, padding = 'SAME')
    return layer
In [14]:
# CREATE MODEL
# Define a Function for Architecture
def UNet(x, n_filters):
    c1_a = convolutional_block(x, 16 * n_filters, 3) 
    c1 = convolutional_block(c1_a, 16 * n_filters, 3) 
    c1 = tf.nn.max_pool(value=c1,
                            ksize=[1, 2, 2, 1],
                            strides=[1, 2, 2, 1],
                            padding='SAME')

    c2_a = convolutional_block(c1, 32 * n_filters, 3) 
    c2 = convolutional_block(c2_a, 32 * n_filters, 3)
    c2 = tf.nn.max_pool(value=c2,
                            ksize=[1, 2, 2, 1],
                            strides=[1, 2, 2, 1],
                            padding='SAME')

    c3_a = convolutional_block(c2, 64 * n_filters, 3) 
    c3 = convolutional_block(c3_a, 64 * n_filters, 3) 
    c3 = tf.nn.max_pool(value=c3,
                            ksize=[1, 2, 2, 1],
                            strides=[1, 2, 2, 1],
                            padding='SAME')


    c4_a = convolutional_block(c3, 128 * n_filters, 3) 
    c4 = convolutional_block(c4_a, 128 * n_filters, 3) 
    c4 = tf.nn.max_pool(value=c4,
                            ksize=[1, 2, 2, 1],
                            strides=[1, 2, 2, 1],
                            padding='SAME')

    c5_a = convolutional_block(c4, 256 * n_filters, 3) 
    c5 = convolutional_block(c5_a, 256 * n_filters, 3) 

    d1 = deconvolutional_block(c5, 2, 18, 128 * n_filters, 256 * n_filters, strides=[1, 2, 2, 1]) 
    cat1 = tf.concat([d1, c4_a], 3)
    cat1 = convolutional_block(cat1, 128 * n_filters, 3) 
    cat1 = convolutional_block(cat1, 128 * n_filters, 3) 

    d2 = deconvolutional_block(cat1, 2, 36, 64 * n_filters, 128 * n_filters, strides=[1, 2, 2, 1]) 
    cat2 = tf.concat([d2, c3_a], 3)
    cat2 = convolutional_block(cat2, 64 * n_filters, 3) 
    cat2 = convolutional_block(cat2, 64 * n_filters, 3) 

    d3 = deconvolutional_block(cat2, 2, 72, 32 * n_filters, 64 * n_filters, strides=[1, 2, 2, 1]) 
    cat3 = tf.concat([d3, c2_a], 3)
    cat3 = convolutional_block(cat3, 32 * n_filters, 3) 
    cat3 = convolutional_block(cat3, 32 * n_filters, 3) 

    d4 = deconvolutional_block(cat3, 2, 144, 16 * n_filters, 32 * n_filters, strides=[1, 2, 2, 1]) 
    cat4 = tf.concat([d4, c1_a], 3)
    cat4 = convolutional_block(cat4, 16 * n_filters, 3) 
    cat4 = convolutional_block(cat4, 16 * n_filters, 3) 

    y_pred = deconvolutional_block(d4, 1, 144, 1, 16 * n_filters, name = True)
    
    return y_pred

Set GPU Parameters

In [15]:
# SET GPU SETTINGS
# Configure
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allocator_type = 'BFC'
config.gpu_options.per_process_gpu_memory_fraction = 0.80

Train

In [16]:
# TRAIN
# Set Placeholder
x = tf.placeholder(tf.float32, shape=[None, 144, 144, 3], name = 'x')
y = tf.placeholder(tf.float32, [None, 144, 144, 1], name = 'y')

print("Setting Placeholder")
Setting Placeholder
In [43]:
# TRAIN
# Set Training Pipeline
rate = 0.00001
batch_size = 8
epochs = 1000
y_pred = UNet(x,2)
loss_operation = tf.losses.mean_squared_error(y, y_pred)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)

print("Setting Training Pipeline")
Setting Training Pipeline
In [44]:
# TRAIN  
# Initialize Evaluation
correct_prediction = tf.losses.mean_squared_error(y, y_pred)

# Define Evaluation 
def evaluate(x_data, y_data):
    num_examples = len(x_data)
    total_accuracy = 0
    sess = tf.get_default_session()
    for offset in range(0, num_examples, batch_size):
        batch_x, batch_y = x_data[offset:offset + batch_size], y_data[offset:offset + batch_size]
        accuracy = sess.run(correct_prediction, feed_dict={x:batch_x, y:batch_y})
        total_accuracy += (accuracy * len(batch_x))
    return total_accuracy/num_examples
In [45]:
# TRAIN 
# Initialize Saver
saver = tf.train.Saver()
In [47]:
# TRAIN 
# Initialize 
training_loss_array = []
validation_loss_array = []

# Use Session to Test Model
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    num_examples = len(x_train)
    print("Training...")
    for i in range(epochs):
        x_train, y_train = shuffle(x_train, y_train)
        for offset in range(0, num_examples, batch_size):
            end = offset + batch_size
            batch_x, batch_y = x_train[offset:end], y_train[offset:end]
            loss_value, _ = sess.run([loss_operation, training_operation], feed_dict={x:batch_x, y:batch_y}) 
        training_loss_array.append(loss_value)
        validation_loss = evaluate(x_valid, y_valid)
        validation_loss_array.append(validation_loss)
        saver.save(sess,'./Model-Tensorflow/unet')
        print("Epoch {} ...".format(i+1))
        print("Training Loss = {:.3f}".format(loss_value)) 
        print("Validation Loss = {:.3f}".format(validation_loss))
        print("Model Saved")
Training...
Epoch 1 ...
Training Loss = 8115.909
Validation Loss = 8061.176
Model Saved
Epoch 2 ...
Training Loss = 7015.675
Validation Loss = 7088.119
Model Saved
Epoch 3 ...
Training Loss = 6530.034
Validation Loss = 6620.042
Model Saved
Epoch 4 ...
Training Loss = 5882.972
Validation Loss = 6165.325
Model Saved
Epoch 5 ...
Training Loss = 5994.021
Validation Loss = 6136.030
Model Saved
Epoch 6 ...
Training Loss = 6114.007
Validation Loss = 6396.058
Model Saved
Epoch 7 ...
Training Loss = 5824.371
Validation Loss = 5883.358
Model Saved
Epoch 8 ...
Training Loss = 5912.016
Validation Loss = 5746.205
Model Saved
Epoch 9 ...
Training Loss = 4954.975
Validation Loss = 5786.265
Model Saved
Epoch 10 ...
Training Loss = 5635.596
Validation Loss = 5645.694
Model Saved
Epoch 11 ...
Training Loss = 5130.091
Validation Loss = 5559.648
Model Saved
Epoch 12 ...
Training Loss = 5246.310
Validation Loss = 5394.859
Model Saved
Epoch 13 ...
Training Loss = 5480.826
Validation Loss = 5243.181
Model Saved
Epoch 14 ...
Training Loss = 4477.652
Validation Loss = 5374.365
Model Saved
Epoch 15 ...
Training Loss = 4722.542
Validation Loss = 5431.912
Model Saved
Epoch 16 ...
Training Loss = 3682.256
Validation Loss = 5137.163
Model Saved
Epoch 17 ...
Training Loss = 6448.907
Validation Loss = 5236.298
Model Saved
Epoch 18 ...
Training Loss = 4646.665
Validation Loss = 5151.672
Model Saved
Epoch 19 ...
Training Loss = 6123.764
Validation Loss = 5259.825
Model Saved
Epoch 20 ...
Training Loss = 3647.377
Validation Loss = 4968.549
Model Saved
Epoch 21 ...
Training Loss = 3995.184
Validation Loss = 4684.258
Model Saved
Epoch 22 ...
Training Loss = 4291.634
Validation Loss = 4763.340
Model Saved
Epoch 23 ...
Training Loss = 4619.424
Validation Loss = 4845.768
Model Saved
Epoch 24 ...
Training Loss = 3971.774
Validation Loss = 4692.141
Model Saved
Epoch 25 ...
Training Loss = 3518.801
Validation Loss = 4534.869
Model Saved
Epoch 26 ...
Training Loss = 3547.081
Validation Loss = 4853.986
Model Saved
Epoch 27 ...
Training Loss = 3918.707
Validation Loss = 4655.716
Model Saved
Epoch 28 ...
Training Loss = 4953.423
Validation Loss = 4507.304
Model Saved
Epoch 29 ...
Training Loss = 3568.020
Validation Loss = 4664.960
Model Saved
Epoch 30 ...
Training Loss = 3615.693
Validation Loss = 4404.262
Model Saved
Epoch 31 ...
Training Loss = 3202.483
Validation Loss = 4691.241
Model Saved
Epoch 32 ...
Training Loss = 3485.722
Validation Loss = 4528.076
Model Saved
Epoch 33 ...
Training Loss = 5122.336
Validation Loss = 4391.011
Model Saved
Epoch 34 ...
Training Loss = 3240.069
Validation Loss = 4421.131
Model Saved
Epoch 35 ...
Training Loss = 2881.354
Validation Loss = 4300.292
Model Saved
Epoch 36 ...
Training Loss = 3686.276
Validation Loss = 4294.951
Model Saved
Epoch 37 ...
Training Loss = 3769.054
Validation Loss = 4347.501
Model Saved
Epoch 38 ...
Training Loss = 3954.082
Validation Loss = 4204.590
Model Saved
Epoch 39 ...
Training Loss = 3569.336
Validation Loss = 4143.579
Model Saved
Epoch 40 ...
Training Loss = 4176.636
Validation Loss = 4168.105
Model Saved
Epoch 41 ...
Training Loss = 2568.316
Validation Loss = 3906.833
Model Saved
Epoch 42 ...
Training Loss = 3822.117
Validation Loss = 4280.529
Model Saved
Epoch 43 ...
Training Loss = 3293.089
Validation Loss = 4045.938
Model Saved
Epoch 44 ...
Training Loss = 3003.358
Validation Loss = 4038.832
Model Saved
Epoch 45 ...
Training Loss = 4261.245
Validation Loss = 4085.227
Model Saved
Epoch 46 ...
Training Loss = 3872.030
Validation Loss = 4030.179
Model Saved
Epoch 47 ...
Training Loss = 3530.069
Validation Loss = 3986.655
Model Saved
Epoch 48 ...
Training Loss = 3320.287
Validation Loss = 4039.321
Model Saved
Epoch 49 ...
Training Loss = 3960.951
Validation Loss = 3976.145
Model Saved
Epoch 50 ...
Training Loss = 2560.780
Validation Loss = 3973.422
Model Saved
Epoch 51 ...
Training Loss = 2133.022
Validation Loss = 3880.440
Model Saved
Epoch 52 ...
Training Loss = 4297.377
Validation Loss = 3990.206
Model Saved
Epoch 53 ...
Training Loss = 2458.333
Validation Loss = 4122.097
Model Saved
Epoch 54 ...
Training Loss = 2942.776
Validation Loss = 3840.495
Model Saved
Epoch 55 ...
Training Loss = 2562.296
Validation Loss = 3825.609
Model Saved
Epoch 56 ...
Training Loss = 2872.479
Validation Loss = 3868.083
Model Saved
Epoch 57 ...
Training Loss = 2785.908
Validation Loss = 3728.138
Model Saved
Epoch 58 ...
Training Loss = 3095.187
Validation Loss = 3802.575
Model Saved
Epoch 59 ...
Training Loss = 2944.042
Validation Loss = 3709.611
Model Saved
Epoch 60 ...
Training Loss = 3212.961
Validation Loss = 3795.436
Model Saved
Epoch 61 ...
Training Loss = 3825.192
Validation Loss = 3608.640
Model Saved
Epoch 62 ...
Training Loss = 2577.359
Validation Loss = 3645.793
Model Saved
Epoch 63 ...
Training Loss = 3247.588
Validation Loss = 3786.029
Model Saved
Epoch 64 ...
Training Loss = 3220.998
Validation Loss = 3809.714
Model Saved
Epoch 65 ...
Training Loss = 2992.078
Validation Loss = 3721.089
Model Saved
Epoch 66 ...
Training Loss = 2643.889
Validation Loss = 3571.174
Model Saved
Epoch 67 ...
Training Loss = 2707.195
Validation Loss = 3500.516
Model Saved
Epoch 68 ...
Training Loss = 3393.010
Validation Loss = 3626.966
Model Saved
Epoch 69 ...
Training Loss = 3789.363
Validation Loss = 3601.250
Model Saved
Epoch 70 ...
Training Loss = 2739.874
Validation Loss = 3457.504
Model Saved
Epoch 71 ...
Training Loss = 2263.032
Validation Loss = 3751.623
Model Saved
Epoch 72 ...
Training Loss = 2882.195
Validation Loss = 3385.275
Model Saved
Epoch 73 ...
Training Loss = 2710.301
Validation Loss = 3659.490
Model Saved
Epoch 74 ...
Training Loss = 2408.080
Validation Loss = 3486.405
Model Saved
Epoch 75 ...
Training Loss = 4144.994
Validation Loss = 3390.508
Model Saved
Epoch 76 ...
Training Loss = 2980.751
Validation Loss = 3372.716
Model Saved
Epoch 77 ...
Training Loss = 1678.098
Validation Loss = 3382.906
Model Saved
Epoch 78 ...
Training Loss = 2022.906
Validation Loss = 3467.892
Model Saved
Epoch 79 ...
Training Loss = 4665.613
Validation Loss = 3480.552
Model Saved
Epoch 80 ...
Training Loss = 2125.892
Validation Loss = 3314.540
Model Saved
Epoch 81 ...
Training Loss = 2544.913
Validation Loss = 3308.081
Model Saved
Epoch 82 ...
Training Loss = 2053.390
Validation Loss = 3405.872
Model Saved
Epoch 83 ...
Training Loss = 2146.113
Validation Loss = 3304.479
Model Saved
Epoch 84 ...
Training Loss = 2801.354
Validation Loss = 3259.956
Model Saved
Epoch 85 ...
Training Loss = 2491.779
Validation Loss = 3263.414
Model Saved
Epoch 86 ...
Training Loss = 1936.419
Validation Loss = 3331.421
Model Saved
Epoch 87 ...
Training Loss = 2723.344
Validation Loss = 3314.741
Model Saved
Epoch 88 ...
Training Loss = 2716.033
Validation Loss = 3613.289
Model Saved
Epoch 89 ...
Training Loss = 2846.285
Validation Loss = 3325.653
Model Saved
Epoch 90 ...
Training Loss = 3172.366
Validation Loss = 3181.849
Model Saved
Epoch 91 ...
Training Loss = 2070.292
Validation Loss = 3368.007
Model Saved
Epoch 92 ...
Training Loss = 2941.291
Validation Loss = 3212.184
Model Saved
Epoch 93 ...
Training Loss = 1999.842
Validation Loss = 3643.468
Model Saved
Epoch 94 ...
Training Loss = 2808.573
Validation Loss = 3271.358
Model Saved
Epoch 95 ...
Training Loss = 1840.978
Validation Loss = 3190.067
Model Saved
Epoch 96 ...
Training Loss = 2832.325
Validation Loss = 3175.934
Model Saved
Epoch 97 ...
Training Loss = 2116.235
Validation Loss = 3127.553
Model Saved
Epoch 98 ...
Training Loss = 3438.344
Validation Loss = 3200.332
Model Saved
Epoch 99 ...
Training Loss = 2113.271
Validation Loss = 3049.898
Model Saved
Epoch 100 ...
Training Loss = 2129.646
Validation Loss = 3141.236
Model Saved
Epoch 101 ...
Training Loss = 2735.726
Validation Loss = 3216.832
Model Saved
Epoch 102 ...
Training Loss = 2291.847
Validation Loss = 3285.732
Model Saved
Epoch 103 ...
Training Loss = 2100.533
Validation Loss = 3373.060
Model Saved
Epoch 104 ...
Training Loss = 2100.783
Validation Loss = 3157.548
Model Saved
Epoch 105 ...
Training Loss = 2096.663
Validation Loss = 3088.453
Model Saved
Epoch 106 ...
Training Loss = 2355.803
Validation Loss = 3256.833
Model Saved
Epoch 107 ...
Training Loss = 2176.678
Validation Loss = 3127.846
Model Saved
Epoch 108 ...
Training Loss = 2003.631
Validation Loss = 3089.047
Model Saved
Epoch 109 ...
Training Loss = 3457.162
Validation Loss = 3189.981
Model Saved
Epoch 110 ...
Training Loss = 1981.391
Validation Loss = 3148.329
Model Saved
Epoch 111 ...
Training Loss = 1761.013
Validation Loss = 3049.689
Model Saved
Epoch 112 ...
Training Loss = 1878.154
Validation Loss = 3359.321
Model Saved
Epoch 113 ...
Training Loss = 2207.286
Validation Loss = 3101.209
Model Saved
Epoch 114 ...
Training Loss = 2177.911
Validation Loss = 3006.559
Model Saved
Epoch 115 ...
Training Loss = 2138.729
Validation Loss = 3029.015
Model Saved
Epoch 116 ...
Training Loss = 1828.607
Validation Loss = 3110.853
Model Saved
Epoch 117 ...
Training Loss = 2841.248
Validation Loss = 3159.888
Model Saved
Epoch 118 ...
Training Loss = 1853.528
Validation Loss = 3177.508
Model Saved
Epoch 119 ...
Training Loss = 3577.321
Validation Loss = 3044.897
Model Saved
Epoch 120 ...
Training Loss = 2046.587
Validation Loss = 3064.564
Model Saved
Epoch 121 ...
Training Loss = 2010.929
Validation Loss = 3087.514
Model Saved
Epoch 122 ...
Training Loss = 2716.128
Validation Loss = 3026.648
Model Saved
Epoch 123 ...
Training Loss = 1499.190
Validation Loss = 3040.274
Model Saved
Epoch 124 ...
Training Loss = 1709.376
Validation Loss = 3006.392
Model Saved
Epoch 125 ...
Training Loss = 3414.893
Validation Loss = 3127.444
Model Saved
Epoch 126 ...
Training Loss = 1309.609
Validation Loss = 3101.114
Model Saved
Epoch 127 ...
Training Loss = 1875.852
Validation Loss = 2982.476
Model Saved
Epoch 128 ...
Training Loss = 1947.313
Validation Loss = 3057.566
Model Saved
Epoch 129 ...
Training Loss = 2848.375
Validation Loss = 2995.723
Model Saved
Epoch 130 ...
Training Loss = 1716.880
Validation Loss = 3129.330
Model Saved
Epoch 131 ...
Training Loss = 1285.880
Validation Loss = 3054.022
Model Saved
Epoch 132 ...
Training Loss = 1745.177
Validation Loss = 3039.300
Model Saved
Epoch 133 ...
Training Loss = 2320.844
Validation Loss = 2941.298
Model Saved
Epoch 134 ...
Training Loss = 2395.403
Validation Loss = 3019.949
Model Saved
Epoch 135 ...
Training Loss = 1434.375
Validation Loss = 3035.573
Model Saved
Epoch 136 ...
Training Loss = 1773.179
Validation Loss = 3036.836
Model Saved
Epoch 137 ...
Training Loss = 1329.905
Validation Loss = 3060.402
Model Saved
Epoch 138 ...
Training Loss = 2152.404
Validation Loss = 2960.069
Model Saved
Epoch 139 ...
Training Loss = 1712.843
Validation Loss = 2925.216
Model Saved
Epoch 140 ...
Training Loss = 1501.772
Validation Loss = 2981.075
Model Saved
Epoch 141 ...
Training Loss = 1571.282
Validation Loss = 2986.542
Model Saved
Epoch 142 ...
Training Loss = 1806.437
Validation Loss = 2925.577
Model Saved
Epoch 143 ...
Training Loss = 1345.120
Validation Loss = 3010.721
Model Saved
Epoch 144 ...
Training Loss = 1369.127
Validation Loss = 3097.329
Model Saved
Epoch 145 ...
Training Loss = 2000.191
Validation Loss = 3024.628
Model Saved
Epoch 146 ...
Training Loss = 1406.670
Validation Loss = 3055.884
Model Saved
Epoch 147 ...
Training Loss = 2302.042
Validation Loss = 2881.643
Model Saved
Epoch 148 ...
Training Loss = 1513.350
Validation Loss = 2936.068
Model Saved
Epoch 149 ...
Training Loss = 2460.472
Validation Loss = 3056.973
Model Saved
Epoch 150 ...
Training Loss = 1966.633
Validation Loss = 2917.211
Model Saved
Epoch 151 ...
Training Loss = 2092.123
Validation Loss = 2959.768
Model Saved
Epoch 152 ...
Training Loss = 1296.275
Validation Loss = 2924.293
Model Saved
Epoch 153 ...
Training Loss = 1406.407
Validation Loss = 3090.709
Model Saved
Epoch 154 ...
Training Loss = 3067.485
Validation Loss = 3015.626
Model Saved
Epoch 155 ...
Training Loss = 1427.105
Validation Loss = 2872.839
Model Saved
Epoch 156 ...
Training Loss = 1661.584
Validation Loss = 3269.433
Model Saved
Epoch 157 ...
Training Loss = 1366.394
Validation Loss = 2809.054
Model Saved
Epoch 158 ...
Training Loss = 1946.129
Validation Loss = 2965.054
Model Saved
Epoch 159 ...
Training Loss = 1746.595
Validation Loss = 2876.564
Model Saved
Epoch 160 ...
Training Loss = 1265.674
Validation Loss = 2933.181
Model Saved
Epoch 161 ...
Training Loss = 1487.600
Validation Loss = 2998.660
Model Saved
Epoch 162 ...
Training Loss = 1485.552
Validation Loss = 2947.870
Model Saved
Epoch 163 ...
Training Loss = 1466.001
Validation Loss = 3066.189
Model Saved
Epoch 164 ...
Training Loss = 1807.693
Validation Loss = 2938.696
Model Saved
Epoch 165 ...
Training Loss = 1440.512
Validation Loss = 2948.233
Model Saved
Epoch 166 ...
Training Loss = 1921.144
Validation Loss = 3031.092
Model Saved
Epoch 167 ...
Training Loss = 1585.833
Validation Loss = 2908.805
Model Saved
Epoch 168 ...
Training Loss = 1289.470
Validation Loss = 3068.768
Model Saved
Epoch 169 ...
Training Loss = 1833.443
Validation Loss = 2835.096
Model Saved
Epoch 170 ...
Training Loss = 1262.972
Validation Loss = 2877.089
Model Saved
Epoch 171 ...
Training Loss = 1257.236
Validation Loss = 2996.476
Model Saved
Epoch 172 ...
Training Loss = 1462.775
Validation Loss = 2927.907
Model Saved
Epoch 173 ...
Training Loss = 1917.338
Validation Loss = 2840.549
Model Saved
Epoch 174 ...
Training Loss = 1802.496
Validation Loss = 3017.227
Model Saved
Epoch 175 ...
Training Loss = 1415.712
Validation Loss = 2917.480
Model Saved
Epoch 176 ...
Training Loss = 1298.939
Validation Loss = 2878.850
Model Saved
Epoch 177 ...
Training Loss = 1707.694
Validation Loss = 3069.726
Model Saved
Epoch 178 ...
Training Loss = 1706.539
Validation Loss = 2855.688
Model Saved
Epoch 179 ...
Training Loss = 1497.474
Validation Loss = 2798.105
Model Saved
Epoch 180 ...
Training Loss = 1231.068
Validation Loss = 2940.502
Model Saved
Epoch 181 ...
Training Loss = 1755.801
Validation Loss = 2931.192
Model Saved
Epoch 182 ...
Training Loss = 1458.499
Validation Loss = 2913.355
Model Saved
Epoch 183 ...
Training Loss = 1180.446
Validation Loss = 2897.044
Model Saved
Epoch 184 ...
Training Loss = 1289.609
Validation Loss = 2828.670
Model Saved
Epoch 185 ...
Training Loss = 1275.731
Validation Loss = 2854.833
Model Saved
Epoch 186 ...
Training Loss = 1000.673
Validation Loss = 2818.610
Model Saved
Epoch 187 ...
Training Loss = 1329.736
Validation Loss = 2909.113
Model Saved
Epoch 188 ...
Training Loss = 1310.205
Validation Loss = 2900.617
Model Saved
Epoch 189 ...
Training Loss = 1460.658
Validation Loss = 2929.689
Model Saved
Epoch 190 ...
Training Loss = 1277.525
Validation Loss = 2906.803
Model Saved
Epoch 191 ...
Training Loss = 1315.061
Validation Loss = 2789.915
Model Saved
Epoch 192 ...
Training Loss = 990.482
Validation Loss = 2826.758
Model Saved
Epoch 193 ...
Training Loss = 1340.784
Validation Loss = 2835.992
Model Saved
Epoch 194 ...
Training Loss = 1223.502
Validation Loss = 2856.879
Model Saved
Epoch 195 ...
Training Loss = 920.321
Validation Loss = 2848.078
Model Saved
Epoch 196 ...
Training Loss = 977.665
Validation Loss = 2778.575
Model Saved
Epoch 197 ...
Training Loss = 1007.690
Validation Loss = 2728.919
Model Saved
Epoch 198 ...
Training Loss = 1156.713
Validation Loss = 2847.881
Model Saved
Epoch 199 ...
Training Loss = 1207.557
Validation Loss = 2752.636
Model Saved
Epoch 200 ...
Training Loss = 859.422
Validation Loss = 2777.153
Model Saved
Epoch 201 ...
Training Loss = 1035.651
Validation Loss = 2793.681
Model Saved
Epoch 202 ...
Training Loss = 1082.942
Validation Loss = 2744.573
Model Saved
Epoch 203 ...
Training Loss = 1386.311
Validation Loss = 2753.240
Model Saved
Epoch 204 ...
Training Loss = 1466.789
Validation Loss = 2733.396
Model Saved
Epoch 205 ...
Training Loss = 1188.516
Validation Loss = 2931.481
Model Saved
Epoch 206 ...
Training Loss = 1407.238
Validation Loss = 2783.541
Model Saved
Epoch 207 ...
Training Loss = 1311.409
Validation Loss = 2716.842
Model Saved
Epoch 208 ...
Training Loss = 907.644
Validation Loss = 2955.667
Model Saved
Epoch 209 ...
Training Loss = 1180.396
Validation Loss = 2828.386
Model Saved
Epoch 210 ...
Training Loss = 1510.442
Validation Loss = 2816.652
Model Saved
Epoch 211 ...
Training Loss = 1620.645
Validation Loss = 2655.511
Model Saved
Epoch 212 ...
Training Loss = 1569.659
Validation Loss = 2793.859
Model Saved
Epoch 213 ...
Training Loss = 1482.779
Validation Loss = 2818.456
Model Saved
Epoch 214 ...
Training Loss = 1392.443
Validation Loss = 2739.861
Model Saved
Epoch 215 ...
Training Loss = 1116.606
Validation Loss = 2807.918
Model Saved
Epoch 216 ...
Training Loss = 932.262
Validation Loss = 2790.832
Model Saved
Epoch 217 ...
Training Loss = 1311.586
Validation Loss = 2947.397
Model Saved
Epoch 218 ...
Training Loss = 1070.726
Validation Loss = 2726.675
Model Saved
Epoch 219 ...
Training Loss = 1588.935
Validation Loss = 2782.656
Model Saved
Epoch 220 ...
Training Loss = 1520.620
Validation Loss = 2750.934
Model Saved
Epoch 221 ...
Training Loss = 1275.166
Validation Loss = 2783.744
Model Saved
Epoch 222 ...
Training Loss = 1292.812
Validation Loss = 2936.489
Model Saved
Epoch 223 ...
Training Loss = 1178.711
Validation Loss = 2778.627
Model Saved
Epoch 224 ...
Training Loss = 1110.521
Validation Loss = 2717.898
Model Saved
Epoch 225 ...
Training Loss = 847.972
Validation Loss = 2760.225
Model Saved
Epoch 226 ...
Training Loss = 1440.607
Validation Loss = 2692.901
Model Saved
Epoch 227 ...
Training Loss = 724.826
Validation Loss = 2701.234
Model Saved
Epoch 228 ...
Training Loss = 1276.252
Validation Loss = 2615.098
Model Saved
Epoch 229 ...
Training Loss = 1131.207
Validation Loss = 2735.605
Model Saved
Epoch 230 ...
Training Loss = 1001.697
Validation Loss = 2744.221
Model Saved
Epoch 231 ...
Training Loss = 1166.003
Validation Loss = 2690.879
Model Saved
Epoch 232 ...
Training Loss = 1588.414
Validation Loss = 2834.256
Model Saved
Epoch 233 ...
Training Loss = 1062.008
Validation Loss = 2733.821
Model Saved
Epoch 234 ...
Training Loss = 1005.088
Validation Loss = 2657.469
Model Saved
Epoch 235 ...
Training Loss = 1152.707
Validation Loss = 2900.531
Model Saved
Epoch 236 ...
Training Loss = 1184.902
Validation Loss = 2725.353
Model Saved
Epoch 237 ...
Training Loss = 1230.530
Validation Loss = 2822.665
Model Saved
Epoch 238 ...
Training Loss = 1205.563
Validation Loss = 2627.833
Model Saved
Epoch 239 ...
Training Loss = 980.443
Validation Loss = 2745.689
Model Saved
Epoch 240 ...
Training Loss = 1306.021
Validation Loss = 2720.757
Model Saved
Epoch 241 ...
Training Loss = 1455.527
Validation Loss = 2701.289
Model Saved
Epoch 242 ...
Training Loss = 1007.629
Validation Loss = 2720.384
Model Saved
Epoch 243 ...
Training Loss = 878.494
Validation Loss = 2689.061
Model Saved
Epoch 244 ...
Training Loss = 1170.652
Validation Loss = 2759.013
Model Saved
Epoch 245 ...
Training Loss = 1068.284
Validation Loss = 2771.935
Model Saved
Epoch 246 ...
Training Loss = 1130.660
Validation Loss = 2815.965
Model Saved
Epoch 247 ...
Training Loss = 1138.344
Validation Loss = 2741.395
Model Saved
Epoch 248 ...
Training Loss = 1137.947
Validation Loss = 2642.383
Model Saved
Epoch 249 ...
Training Loss = 1078.445
Validation Loss = 2634.283
Model Saved
Epoch 250 ...
Training Loss = 1067.914
Validation Loss = 2665.839
Model Saved
Epoch 251 ...
Training Loss = 1062.285
Validation Loss = 2813.974
Model Saved
Epoch 252 ...
Training Loss = 975.278
Validation Loss = 2735.549
Model Saved
Epoch 253 ...
Training Loss = 1308.931
Validation Loss = 2717.788
Model Saved
Epoch 254 ...
Training Loss = 1113.552
Validation Loss = 2767.801
Model Saved
Epoch 255 ...
Training Loss = 1172.686
Validation Loss = 2734.520
Model Saved
Epoch 256 ...
Training Loss = 1192.904
Validation Loss = 2711.127
Model Saved
Epoch 257 ...
Training Loss = 950.247
Validation Loss = 2717.213
Model Saved
Epoch 258 ...
Training Loss = 1447.467
Validation Loss = 2744.980
Model Saved
Epoch 259 ...
Training Loss = 955.077
Validation Loss = 2614.169
Model Saved
Epoch 260 ...
Training Loss = 991.194
Validation Loss = 2711.412
Model Saved
Epoch 261 ...
Training Loss = 1113.098
Validation Loss = 2637.692
Model Saved
Epoch 262 ...
Training Loss = 1216.160
Validation Loss = 2786.487
Model Saved
Epoch 263 ...
Training Loss = 974.736
Validation Loss = 2640.509
Model Saved
Epoch 264 ...
Training Loss = 949.941
Validation Loss = 2793.490
Model Saved
Epoch 265 ...
Training Loss = 1051.527
Validation Loss = 2587.494
Model Saved
Epoch 266 ...
Training Loss = 1229.108
Validation Loss = 2640.318
Model Saved
Epoch 267 ...
Training Loss = 1016.553
Validation Loss = 2649.496
Model Saved
Epoch 268 ...
Training Loss = 1101.535
Validation Loss = 2690.606
Model Saved
Epoch 269 ...
Training Loss = 976.680
Validation Loss = 2732.384
Model Saved
Epoch 270 ...
Training Loss = 908.524
Validation Loss = 2803.724
Model Saved
Epoch 271 ...
Training Loss = 989.509
Validation Loss = 2740.603
Model Saved
Epoch 272 ...
Training Loss = 896.220
Validation Loss = 2689.896
Model Saved
Epoch 273 ...
Training Loss = 892.581
Validation Loss = 2632.071
Model Saved
Epoch 274 ...
Training Loss = 901.224
Validation Loss = 2737.488
Model Saved
Epoch 275 ...
Training Loss = 914.338
Validation Loss = 2788.966
Model Saved
Epoch 276 ...
Training Loss = 823.686
Validation Loss = 2662.723
Model Saved
Epoch 277 ...
Training Loss = 1014.034
Validation Loss = 2691.787
Model Saved
Epoch 278 ...
Training Loss = 1058.814
Validation Loss = 2680.638
Model Saved
Epoch 279 ...
Training Loss = 1094.032
Validation Loss = 2612.090
Model Saved
Epoch 280 ...
Training Loss = 1372.893
Validation Loss = 2628.270
Model Saved
Epoch 281 ...
Training Loss = 1005.653
Validation Loss = 2746.467
Model Saved
Epoch 282 ...
Training Loss = 1122.838
Validation Loss = 2605.137
Model Saved
Epoch 283 ...
Training Loss = 925.380
Validation Loss = 2647.036
Model Saved
Epoch 284 ...
Training Loss = 966.165
Validation Loss = 2627.793
Model Saved
Epoch 285 ...
Training Loss = 1192.968
Validation Loss = 2781.714
Model Saved
Epoch 286 ...
Training Loss = 1164.975
Validation Loss = 2765.795
Model Saved
Epoch 287 ...
Training Loss = 1359.211
Validation Loss = 2597.382
Model Saved
Epoch 288 ...
Training Loss = 1174.179
Validation Loss = 2573.633
Model Saved
Epoch 289 ...
Training Loss = 848.297
Validation Loss = 2715.538
Model Saved
Epoch 290 ...
Training Loss = 1143.905
Validation Loss = 2657.949
Model Saved
Epoch 291 ...
Training Loss = 789.337
Validation Loss = 2594.376
Model Saved
Epoch 292 ...
Training Loss = 1076.876
Validation Loss = 2725.184
Model Saved
Epoch 293 ...
Training Loss = 856.542
Validation Loss = 2713.056
Model Saved
Epoch 294 ...
Training Loss = 700.839
Validation Loss = 2600.189
Model Saved
Epoch 295 ...
Training Loss = 1204.098
Validation Loss = 2655.484
Model Saved
Epoch 296 ...
Training Loss = 970.141
Validation Loss = 2747.573
Model Saved
Epoch 297 ...
Training Loss = 916.403
Validation Loss = 2596.657
Model Saved
Epoch 298 ...
Training Loss = 957.562
Validation Loss = 2522.700
Model Saved
Epoch 299 ...
Training Loss = 958.170
Validation Loss = 2688.890
Model Saved
Epoch 300 ...
Training Loss = 897.038
Validation Loss = 2632.997
Model Saved
Epoch 301 ...
Training Loss = 821.249
Validation Loss = 2649.486
Model Saved
Epoch 302 ...
Training Loss = 949.023
Validation Loss = 2551.833
Model Saved
Epoch 303 ...
Training Loss = 787.224
Validation Loss = 2629.567
Model Saved
Epoch 304 ...
Training Loss = 1078.783
Validation Loss = 2651.191
Model Saved
Epoch 305 ...
Training Loss = 654.775
Validation Loss = 2644.056
Model Saved
Epoch 306 ...
Training Loss = 815.477
Validation Loss = 2633.546
Model Saved
Epoch 307 ...
Training Loss = 949.969
Validation Loss = 2699.775
Model Saved
Epoch 308 ...
Training Loss = 1037.915
Validation Loss = 2723.127
Model Saved
Epoch 309 ...
Training Loss = 1072.212
Validation Loss = 2566.147
Model Saved
Epoch 310 ...
Training Loss = 750.488
Validation Loss = 2588.440
Model Saved
Epoch 311 ...
Training Loss = 894.711
Validation Loss = 2666.457
Model Saved
Epoch 312 ...
Training Loss = 932.477
Validation Loss = 2606.645
Model Saved
Epoch 313 ...
Training Loss = 1060.571
Validation Loss = 2653.077
Model Saved
Epoch 314 ...
Training Loss = 820.066
Validation Loss = 2603.595
Model Saved
Epoch 315 ...
Training Loss = 1096.974
Validation Loss = 2672.388
Model Saved
Epoch 316 ...
Training Loss = 774.125
Validation Loss = 2608.455
Model Saved
Epoch 317 ...
Training Loss = 841.408
Validation Loss = 2651.636
Model Saved
Epoch 318 ...
Training Loss = 666.998
Validation Loss = 2600.532
Model Saved
Epoch 319 ...
Training Loss = 937.058
Validation Loss = 2669.187
Model Saved
Epoch 320 ...
Training Loss = 979.009
Validation Loss = 2646.455
Model Saved
Epoch 321 ...
Training Loss = 821.287
Validation Loss = 2598.640
Model Saved
Epoch 322 ...
Training Loss = 847.718
Validation Loss = 2620.733
Model Saved
Epoch 323 ...
Training Loss = 989.735
Validation Loss = 2644.259
Model Saved
Epoch 324 ...
Training Loss = 844.618
Validation Loss = 2570.295
Model Saved
Epoch 325 ...
Training Loss = 752.253
Validation Loss = 2591.156
Model Saved
Epoch 326 ...
Training Loss = 915.523
Validation Loss = 2578.604
Model Saved
Epoch 327 ...
Training Loss = 962.434
Validation Loss = 2709.228
Model Saved
Epoch 328 ...
Training Loss = 800.340
Validation Loss = 2599.406
Model Saved
Epoch 329 ...
Training Loss = 945.457
Validation Loss = 2692.573
Model Saved
Epoch 330 ...
Training Loss = 780.365
Validation Loss = 2606.629
Model Saved
Epoch 331 ...
Training Loss = 764.356
Validation Loss = 2669.071
Model Saved
Epoch 332 ...
Training Loss = 628.714
Validation Loss = 2498.017
Model Saved
Epoch 333 ...
Training Loss = 892.288
Validation Loss = 2535.904
Model Saved
Epoch 334 ...
Training Loss = 775.246
Validation Loss = 2628.420
Model Saved
Epoch 335 ...
Training Loss = 676.873
Validation Loss = 2627.227
Model Saved
Epoch 336 ...
Training Loss = 593.837
Validation Loss = 2543.124
Model Saved
Epoch 337 ...
Training Loss = 823.267
Validation Loss = 2578.461
Model Saved
Epoch 338 ...
Training Loss = 852.187
Validation Loss = 2674.380
Model Saved
Epoch 339 ...
Training Loss = 713.664
Validation Loss = 2634.471
Model Saved
Epoch 340 ...
Training Loss = 727.290
Validation Loss = 2616.256
Model Saved
Epoch 341 ...
Training Loss = 893.012
Validation Loss = 2623.770
Model Saved
Epoch 342 ...
Training Loss = 648.062
Validation Loss = 2731.181
Model Saved
Epoch 343 ...
Training Loss = 941.900
Validation Loss = 2558.589
Model Saved
Epoch 344 ...
Training Loss = 739.961
Validation Loss = 2669.221
Model Saved
Epoch 345 ...
Training Loss = 976.685
Validation Loss = 2585.378
Model Saved
Epoch 346 ...
Training Loss = 910.481
Validation Loss = 2664.227
Model Saved
Epoch 347 ...
Training Loss = 825.702
Validation Loss = 2565.410
Model Saved
Epoch 348 ...
Training Loss = 977.099
Validation Loss = 2545.861
Model Saved
Epoch 349 ...
Training Loss = 957.433
Validation Loss = 2725.873
Model Saved
Epoch 350 ...
Training Loss = 847.547
Validation Loss = 2589.290
Model Saved
Epoch 351 ...
Training Loss = 812.632
Validation Loss = 2625.544
Model Saved
Epoch 352 ...
Training Loss = 1041.234
Validation Loss = 2553.041
Model Saved
Epoch 353 ...
Training Loss = 934.707
Validation Loss = 2543.927
Model Saved
Epoch 354 ...
Training Loss = 853.777
Validation Loss = 2570.162
Model Saved
Epoch 355 ...
Training Loss = 764.205
Validation Loss = 2709.707
Model Saved
Epoch 356 ...
Training Loss = 711.052
Validation Loss = 2599.818
Model Saved
Epoch 357 ...
Training Loss = 666.662
Validation Loss = 2586.113
Model Saved
Epoch 358 ...
Training Loss = 891.937
Validation Loss = 2594.915
Model Saved
Epoch 359 ...
Training Loss = 586.875
Validation Loss = 2587.438
Model Saved
Epoch 360 ...
Training Loss = 781.191
Validation Loss = 2665.703
Model Saved
Epoch 361 ...
Training Loss = 697.093
Validation Loss = 2540.121
Model Saved
Epoch 362 ...
Training Loss = 992.675
Validation Loss = 2601.677
Model Saved
Epoch 363 ...
Training Loss = 801.296
Validation Loss = 2524.649
Model Saved
Epoch 364 ...
Training Loss = 691.120
Validation Loss = 2584.078
Model Saved
Epoch 365 ...
Training Loss = 734.536
Validation Loss = 2535.407
Model Saved
Epoch 366 ...
Training Loss = 803.162
Validation Loss = 2567.188
Model Saved
Epoch 367 ...
Training Loss = 944.268
Validation Loss = 2600.136
Model Saved
Epoch 368 ...
Training Loss = 757.046
Validation Loss = 2518.793
Model Saved
Epoch 369 ...
Training Loss = 876.288
Validation Loss = 2568.844
Model Saved
Epoch 370 ...
Training Loss = 630.197
Validation Loss = 2607.880
Model Saved
Epoch 371 ...
Training Loss = 798.755
Validation Loss = 2584.629
Model Saved
Epoch 372 ...
Training Loss = 680.745
Validation Loss = 2652.023
Model Saved
Epoch 373 ...
Training Loss = 738.249
Validation Loss = 2592.930
Model Saved
Epoch 374 ...
Training Loss = 756.365
Validation Loss = 2551.082
Model Saved
Epoch 375 ...
Training Loss = 723.631
Validation Loss = 2557.945
Model Saved
Epoch 376 ...
Training Loss = 666.745
Validation Loss = 2579.541
Model Saved
Epoch 377 ...
Training Loss = 729.841
Validation Loss = 2644.150
Model Saved
Epoch 378 ...
Training Loss = 702.794
Validation Loss = 2479.587
Model Saved
Epoch 379 ...
Training Loss = 667.238
Validation Loss = 2554.499
Model Saved
Epoch 380 ...
Training Loss = 839.272
Validation Loss = 2563.705
Model Saved
Epoch 381 ...
Training Loss = 716.340
Validation Loss = 2565.774
Model Saved
Epoch 382 ...
Training Loss = 872.545
Validation Loss = 2682.020
Model Saved
Epoch 383 ...
Training Loss = 839.578
Validation Loss = 2506.854
Model Saved
Epoch 384 ...
Training Loss = 860.328
Validation Loss = 2601.202
Model Saved
Epoch 385 ...
Training Loss = 772.941
Validation Loss = 2556.042
Model Saved
Epoch 386 ...
Training Loss = 684.657
Validation Loss = 2564.524
Model Saved
Epoch 387 ...
Training Loss = 730.239
Validation Loss = 2523.930
Model Saved
Epoch 388 ...
Training Loss = 876.664
Validation Loss = 2580.032
Model Saved
Epoch 389 ...
Training Loss = 926.500
Validation Loss = 2484.197
Model Saved
Epoch 390 ...
Training Loss = 654.090
Validation Loss = 2547.780
Model Saved
Epoch 391 ...
Training Loss = 682.511
Validation Loss = 2600.940
Model Saved
Epoch 392 ...
Training Loss = 754.045
Validation Loss = 2614.918
Model Saved
Epoch 393 ...
Training Loss = 659.898
Validation Loss = 2526.978
Model Saved
Epoch 394 ...
Training Loss = 781.756
Validation Loss = 2554.325
Model Saved
Epoch 395 ...
Training Loss = 868.982
Validation Loss = 2510.778
Model Saved
Epoch 396 ...
Training Loss = 920.330
Validation Loss = 2494.512
Model Saved
Epoch 397 ...
Training Loss = 755.816
Validation Loss = 2588.492
Model Saved
Epoch 398 ...
Training Loss = 677.880
Validation Loss = 2552.276
Model Saved
Epoch 399 ...
Training Loss = 868.533
Validation Loss = 2484.014
Model Saved
Epoch 400 ...
Training Loss = 775.063
Validation Loss = 2538.833
Model Saved
Epoch 401 ...
Training Loss = 680.896
Validation Loss = 2559.441
Model Saved
Epoch 402 ...
Training Loss = 522.148
Validation Loss = 2595.526
Model Saved
Epoch 403 ...
Training Loss = 759.188
Validation Loss = 2501.288
Model Saved
Epoch 404 ...
Training Loss = 613.621
Validation Loss = 2450.294
Model Saved
Epoch 405 ...
Training Loss = 696.753
Validation Loss = 2528.468
Model Saved
Epoch 406 ...
Training Loss = 929.262
Validation Loss = 2464.970
Model Saved
Epoch 407 ...
Training Loss = 1094.653
Validation Loss = 2635.023
Model Saved
Epoch 408 ...
Training Loss = 712.381
Validation Loss = 2550.472
Model Saved
Epoch 409 ...
Training Loss = 771.690
Validation Loss = 2465.472
Model Saved
Epoch 410 ...
Training Loss = 536.892
Validation Loss = 2550.595
Model Saved
Epoch 411 ...
Training Loss = 525.177
Validation Loss = 2543.172
Model Saved
Epoch 412 ...
Training Loss = 644.000
Validation Loss = 2511.479
Model Saved
Epoch 413 ...
Training Loss = 575.040
Validation Loss = 2523.063
Model Saved
Epoch 414 ...
Training Loss = 800.544
Validation Loss = 2496.814
Model Saved
Epoch 415 ...
Training Loss = 725.621
Validation Loss = 2531.634
Model Saved
Epoch 416 ...
Training Loss = 622.255
Validation Loss = 2542.756
Model Saved
Epoch 417 ...
Training Loss = 769.186
Validation Loss = 2531.791
Model Saved
Epoch 418 ...
Training Loss = 780.411
Validation Loss = 2519.022
Model Saved
Epoch 419 ...
Training Loss = 606.938
Validation Loss = 2526.679
Model Saved
Epoch 420 ...
Training Loss = 657.616
Validation Loss = 2547.851
Model Saved
Epoch 421 ...
Training Loss = 794.893
Validation Loss = 2612.873
Model Saved
Epoch 422 ...
Training Loss = 767.443
Validation Loss = 2504.183
Model Saved
Epoch 423 ...
Training Loss = 515.863
Validation Loss = 2558.531
Model Saved
Epoch 424 ...
Training Loss = 861.109
Validation Loss = 2600.449
Model Saved
Epoch 425 ...
Training Loss = 737.048
Validation Loss = 2468.483
Model Saved
Epoch 426 ...
Training Loss = 744.854
Validation Loss = 2494.103
Model Saved
Epoch 427 ...
Training Loss = 529.372
Validation Loss = 2456.800
Model Saved
Epoch 428 ...
Training Loss = 715.274
Validation Loss = 2462.335
Model Saved
Epoch 429 ...
Training Loss = 718.311
Validation Loss = 2473.415
Model Saved
Epoch 430 ...
Training Loss = 699.321
Validation Loss = 2529.956
Model Saved
Epoch 431 ...
Training Loss = 734.084
Validation Loss = 2535.385
Model Saved
Epoch 432 ...
Training Loss = 669.549
Validation Loss = 2528.143
Model Saved
Epoch 433 ...
Training Loss = 541.115
Validation Loss = 2497.437
Model Saved
Epoch 434 ...
Training Loss = 656.307
Validation Loss = 2505.097
Model Saved
Epoch 435 ...
Training Loss = 458.521
Validation Loss = 2489.072
Model Saved
Epoch 436 ...
Training Loss = 606.446
Validation Loss = 2435.909
Model Saved
Epoch 437 ...
Training Loss = 643.040
Validation Loss = 2525.755
Model Saved
Epoch 438 ...
Training Loss = 724.320
Validation Loss = 2454.782
Model Saved
Epoch 439 ...
Training Loss = 652.480
Validation Loss = 2514.733
Model Saved
Epoch 440 ...
Training Loss = 484.535
Validation Loss = 2537.136
Model Saved
Epoch 441 ...
Training Loss = 661.966
Validation Loss = 2509.985
Model Saved
Epoch 442 ...
Training Loss = 633.487
Validation Loss = 2649.679
Model Saved
Epoch 443 ...
Training Loss = 668.249
Validation Loss = 2573.715
Model Saved
Epoch 444 ...
Training Loss = 653.203
Validation Loss = 2419.130
Model Saved
Epoch 445 ...
Training Loss = 687.233
Validation Loss = 2445.014
Model Saved
Epoch 446 ...
Training Loss = 568.334
Validation Loss = 2517.838
Model Saved
Epoch 447 ...
Training Loss = 961.513
Validation Loss = 2585.037
Model Saved
Epoch 448 ...
Training Loss = 657.203
Validation Loss = 2494.383
Model Saved
Epoch 449 ...
Training Loss = 800.941
Validation Loss = 2478.043
Model Saved
Epoch 450 ...
Training Loss = 681.127
Validation Loss = 2572.894
Model Saved
Epoch 451 ...
Training Loss = 566.725
Validation Loss = 2538.346
Model Saved
Epoch 452 ...
Training Loss = 640.969
Validation Loss = 2485.834
Model Saved
Epoch 453 ...
Training Loss = 630.913
Validation Loss = 2510.227
Model Saved
Epoch 454 ...
Training Loss = 633.539
Validation Loss = 2494.864
Model Saved
Epoch 455 ...
Training Loss = 738.069
Validation Loss = 2459.315
Model Saved
Epoch 456 ...
Training Loss = 673.726
Validation Loss = 2500.025
Model Saved
Epoch 457 ...
Training Loss = 534.014
Validation Loss = 2477.224
Model Saved
Epoch 458 ...
Training Loss = 661.613
Validation Loss = 2466.810
Model Saved
Epoch 459 ...
Training Loss = 589.507
Validation Loss = 2519.254
Model Saved
Epoch 460 ...
Training Loss = 596.883
Validation Loss = 2525.877
Model Saved
Epoch 461 ...
Training Loss = 793.922
Validation Loss = 2481.364
Model Saved
Epoch 462 ...
Training Loss = 684.518
Validation Loss = 2412.695
Model Saved
Epoch 463 ...
Training Loss = 669.925
Validation Loss = 2450.058
Model Saved
Epoch 464 ...
Training Loss = 574.499
Validation Loss = 2440.775
Model Saved
Epoch 465 ...
Training Loss = 626.645
Validation Loss = 2472.687
Model Saved
Epoch 466 ...
Training Loss = 572.440
Validation Loss = 2457.480
Model Saved
Epoch 467 ...
Training Loss = 712.148
Validation Loss = 2454.177
Model Saved
Epoch 468 ...
Training Loss = 555.179
Validation Loss = 2480.287
Model Saved
Epoch 469 ...
Training Loss = 643.909
Validation Loss = 2511.486
Model Saved
Epoch 470 ...
Training Loss = 533.369
Validation Loss = 2420.292
Model Saved
Epoch 471 ...
Training Loss = 783.183
Validation Loss = 2509.983
Model Saved
Epoch 472 ...
Training Loss = 689.726
Validation Loss = 2607.501
Model Saved
Epoch 473 ...
Training Loss = 661.670
Validation Loss = 2450.680
Model Saved
Epoch 474 ...
Training Loss = 668.577
Validation Loss = 2385.178
Model Saved
Epoch 475 ...
Training Loss = 688.285
Validation Loss = 2484.500
Model Saved
Epoch 476 ...
Training Loss = 618.982
Validation Loss = 2465.117
Model Saved
Epoch 477 ...
Training Loss = 650.343
Validation Loss = 2421.262
Model Saved
Epoch 478 ...
Training Loss = 578.851
Validation Loss = 2436.008
Model Saved
Epoch 479 ...
Training Loss = 654.391
Validation Loss = 2435.440
Model Saved
Epoch 480 ...
Training Loss = 593.410
Validation Loss = 2497.379
Model Saved
Epoch 481 ...
Training Loss = 733.293
Validation Loss = 2490.874
Model Saved
Epoch 482 ...
Training Loss = 437.550
Validation Loss = 2442.838
Model Saved
Epoch 483 ...
Training Loss = 541.095
Validation Loss = 2491.339
Model Saved
Epoch 484 ...
Training Loss = 878.034
Validation Loss = 2479.809
Model Saved
Epoch 485 ...
Training Loss = 617.057
Validation Loss = 2435.397
Model Saved
Epoch 486 ...
Training Loss = 614.851
Validation Loss = 2531.243
Model Saved
Epoch 487 ...
Training Loss = 677.872
Validation Loss = 2470.966
Model Saved
Epoch 488 ...
Training Loss = 570.036
Validation Loss = 2535.435
Model Saved
Epoch 489 ...
Training Loss = 674.412
Validation Loss = 2482.111
Model Saved
Epoch 490 ...
Training Loss = 628.654
Validation Loss = 2419.273
Model Saved
Epoch 491 ...
Training Loss = 565.149
Validation Loss = 2491.968
Model Saved
Epoch 492 ...
Training Loss = 727.999
Validation Loss = 2452.819
Model Saved
Epoch 493 ...
Training Loss = 690.479
Validation Loss = 2437.487
Model Saved
Epoch 494 ...
Training Loss = 601.564
Validation Loss = 2484.005
Model Saved
Epoch 495 ...
Training Loss = 584.577
Validation Loss = 2476.727
Model Saved
Epoch 496 ...
Training Loss = 638.447
Validation Loss = 2427.262
Model Saved
Epoch 497 ...
Training Loss = 555.718
Validation Loss = 2428.998
Model Saved
Epoch 498 ...
Training Loss = 649.525
Validation Loss = 2416.409
Model Saved
Epoch 499 ...
Training Loss = 609.201
Validation Loss = 2410.122
Model Saved
Epoch 500 ...
Training Loss = 789.515
Validation Loss = 2473.854
Model Saved
Epoch 501 ...
Training Loss = 603.102
Validation Loss = 2411.175
Model Saved
Epoch 502 ...
Training Loss = 545.927
Validation Loss = 2430.735
Model Saved
Epoch 503 ...
Training Loss = 509.505
Validation Loss = 2431.908
Model Saved
Epoch 504 ...
Training Loss = 616.469
Validation Loss = 2472.932
Model Saved
Epoch 505 ...
Training Loss = 627.780
Validation Loss = 2481.667
Model Saved
Epoch 506 ...
Training Loss = 573.331
Validation Loss = 2456.188
Model Saved
Epoch 507 ...
Training Loss = 559.426
Validation Loss = 2511.428
Model Saved
Epoch 508 ...
Training Loss = 576.495
Validation Loss = 2389.784
Model Saved
Epoch 509 ...
Training Loss = 519.970
Validation Loss = 2462.373
Model Saved
Epoch 510 ...
Training Loss = 659.089
Validation Loss = 2505.487
Model Saved
Epoch 511 ...
Training Loss = 633.967
Validation Loss = 2366.349
Model Saved
Epoch 512 ...
Training Loss = 647.347
Validation Loss = 2429.911
Model Saved
Epoch 513 ...
Training Loss = 535.191
Validation Loss = 2453.672
Model Saved
Epoch 514 ...
Training Loss = 797.062
Validation Loss = 2513.299
Model Saved
Epoch 515 ...
Training Loss = 640.263
Validation Loss = 2449.288
Model Saved
Epoch 516 ...
Training Loss = 505.543
Validation Loss = 2466.769
Model Saved
Epoch 517 ...
Training Loss = 504.123
Validation Loss = 2550.519
Model Saved
Epoch 518 ...
Training Loss = 686.011
Validation Loss = 2476.045
Model Saved
Epoch 519 ...
Training Loss = 684.204
Validation Loss = 2457.234
Model Saved
Epoch 520 ...
Training Loss = 626.673
Validation Loss = 2424.590
Model Saved
Epoch 521 ...
Training Loss = 663.134
Validation Loss = 2463.915
Model Saved
Epoch 522 ...
Training Loss = 594.395
Validation Loss = 2507.199
Model Saved
Epoch 523 ...
Training Loss = 708.244
Validation Loss = 2455.172
Model Saved
Epoch 524 ...
Training Loss = 618.335
Validation Loss = 2540.906
Model Saved
Epoch 525 ...
Training Loss = 721.983
Validation Loss = 2427.744
Model Saved
Epoch 526 ...
Training Loss = 674.797
Validation Loss = 2464.479
Model Saved
Epoch 527 ...
Training Loss = 782.209
Validation Loss = 2436.160
Model Saved
Epoch 528 ...
Training Loss = 532.700
Validation Loss = 2508.509
Model Saved
Epoch 529 ...
Training Loss = 552.086
Validation Loss = 2436.834
Model Saved
Epoch 530 ...
Training Loss = 604.020
Validation Loss = 2480.981
Model Saved
Epoch 531 ...
Training Loss = 585.229
Validation Loss = 2420.986
Model Saved
Epoch 532 ...
Training Loss = 586.396
Validation Loss = 2405.995
Model Saved
Epoch 533 ...
Training Loss = 613.749
Validation Loss = 2489.199
Model Saved
Epoch 534 ...
Training Loss = 814.795
Validation Loss = 2500.507
Model Saved
Epoch 535 ...
Training Loss = 624.255
Validation Loss = 2388.756
Model Saved
Epoch 536 ...
Training Loss = 801.398
Validation Loss = 2495.674
Model Saved
Epoch 537 ...
Training Loss = 579.142
Validation Loss = 2441.056
Model Saved
Epoch 538 ...
Training Loss = 559.100
Validation Loss = 2492.801
Model Saved
Epoch 539 ...
Training Loss = 644.064
Validation Loss = 2428.005
Model Saved
Epoch 540 ...
Training Loss = 573.629
Validation Loss = 2503.706
Model Saved
Epoch 541 ...
Training Loss = 589.658
Validation Loss = 2457.139
Model Saved
Epoch 542 ...
Training Loss = 591.333
Validation Loss = 2526.178
Model Saved
Epoch 543 ...
Training Loss = 607.906
Validation Loss = 2467.372
Model Saved
Epoch 544 ...
Training Loss = 652.206
Validation Loss = 2428.744
Model Saved
Epoch 545 ...
Training Loss = 697.104
Validation Loss = 2410.184
Model Saved
Epoch 546 ...
Training Loss = 433.450
Validation Loss = 2397.673
Model Saved
Epoch 547 ...
Training Loss = 560.455
Validation Loss = 2437.759
Model Saved
Epoch 548 ...
Training Loss = 745.849
Validation Loss = 2416.493
Model Saved
Epoch 549 ...
Training Loss = 598.512
Validation Loss = 2406.289
Model Saved
Epoch 550 ...
Training Loss = 678.552
Validation Loss = 2479.029
Model Saved
Epoch 551 ...
Training Loss = 554.609
Validation Loss = 2584.768
Model Saved
Epoch 552 ...
Training Loss = 663.639
Validation Loss = 2496.048
Model Saved
Epoch 553 ...
Training Loss = 570.150
Validation Loss = 2536.507
Model Saved
Epoch 554 ...
Training Loss = 687.879
Validation Loss = 2534.620
Model Saved
Epoch 555 ...
Training Loss = 622.456
Validation Loss = 2442.124
Model Saved
Epoch 556 ...
Training Loss = 623.061
Validation Loss = 2434.703
Model Saved
Epoch 557 ...
Training Loss = 800.589
Validation Loss = 2511.297
Model Saved
Epoch 558 ...
Training Loss = 475.718
Validation Loss = 2458.240
Model Saved
Epoch 559 ...
Training Loss = 584.419
Validation Loss = 2395.108
Model Saved
Epoch 560 ...
Training Loss = 619.178
Validation Loss = 2473.974
Model Saved
Epoch 561 ...
Training Loss = 606.613
Validation Loss = 2422.972
Model Saved
Epoch 562 ...
Training Loss = 498.481
Validation Loss = 2408.668
Model Saved
Epoch 563 ...
Training Loss = 437.911
Validation Loss = 2438.985
Model Saved
Epoch 564 ...
Training Loss = 628.926
Validation Loss = 2461.860
Model Saved
Epoch 565 ...
Training Loss = 573.119
Validation Loss = 2484.143
Model Saved
Epoch 566 ...
Training Loss = 647.853
Validation Loss = 2446.607
Model Saved
Epoch 567 ...
Training Loss = 738.742
Validation Loss = 2477.234
Model Saved
Epoch 568 ...
Training Loss = 577.769
Validation Loss = 2457.854
Model Saved
Epoch 569 ...
Training Loss = 544.881
Validation Loss = 2435.067
Model Saved
Epoch 570 ...
Training Loss = 592.947
Validation Loss = 2409.381
Model Saved
Epoch 571 ...
Training Loss = 626.263
Validation Loss = 2478.847
Model Saved
Epoch 572 ...
Training Loss = 556.372
Validation Loss = 2439.605
Model Saved
Epoch 573 ...
Training Loss = 564.718
Validation Loss = 2453.229
Model Saved
Epoch 574 ...
Training Loss = 813.352
Validation Loss = 2450.382
Model Saved
Epoch 575 ...
Training Loss = 575.781
Validation Loss = 2448.390
Model Saved
Epoch 576 ...
Training Loss = 586.339
Validation Loss = 2418.787
Model Saved
Epoch 577 ...
Training Loss = 519.363
Validation Loss = 2410.593
Model Saved
Epoch 578 ...
Training Loss = 509.277
Validation Loss = 2410.165
Model Saved
Epoch 579 ...
Training Loss = 618.349
Validation Loss = 2437.027
Model Saved
Epoch 580 ...
Training Loss = 556.404
Validation Loss = 2444.009
Model Saved
Epoch 581 ...
Training Loss = 548.172
Validation Loss = 2439.291
Model Saved
Epoch 582 ...
Training Loss = 446.609
Validation Loss = 2442.106
Model Saved
Epoch 583 ...
Training Loss = 636.857
Validation Loss = 2445.629
Model Saved
Epoch 584 ...
Training Loss = 555.682
Validation Loss = 2414.550
Model Saved
Epoch 585 ...
Training Loss = 917.846
Validation Loss = 2395.263
Model Saved
Epoch 586 ...
Training Loss = 609.121
Validation Loss = 2410.426
Model Saved
Epoch 587 ...
Training Loss = 579.531
Validation Loss = 2454.958
Model Saved
Epoch 588 ...
Training Loss = 606.086
Validation Loss = 2382.702
Model Saved
Epoch 589 ...
Training Loss = 484.001
Validation Loss = 2358.669
Model Saved
Epoch 590 ...
Training Loss = 612.105
Validation Loss = 2432.845
Model Saved
Epoch 591 ...
Training Loss = 631.653
Validation Loss = 2470.458
Model Saved
Epoch 592 ...
Training Loss = 900.065
Validation Loss = 2548.011
Model Saved
Epoch 593 ...
Training Loss = 607.021
Validation Loss = 2453.975
Model Saved
Epoch 594 ...
Training Loss = 528.001
Validation Loss = 2418.768
Model Saved
Epoch 595 ...
Training Loss = 478.996
Validation Loss = 2451.960
Model Saved
Epoch 596 ...
Training Loss = 523.794
Validation Loss = 2428.591
Model Saved
Epoch 597 ...
Training Loss = 519.109
Validation Loss = 2463.616
Model Saved
Epoch 598 ...
Training Loss = 548.705
Validation Loss = 2402.900
Model Saved
Epoch 599 ...
Training Loss = 561.904
Validation Loss = 2435.051
Model Saved
Epoch 600 ...
Training Loss = 581.339
Validation Loss = 2488.183
Model Saved
Epoch 601 ...
Training Loss = 420.670
Validation Loss = 2431.130
Model Saved
Epoch 602 ...
Training Loss = 538.956
Validation Loss = 2459.807
Model Saved
Epoch 603 ...
Training Loss = 488.297
Validation Loss = 2420.815
Model Saved
Epoch 604 ...
Training Loss = 502.132
Validation Loss = 2484.897
Model Saved
Epoch 605 ...
Training Loss = 675.220
Validation Loss = 2446.248
Model Saved
Epoch 606 ...
Training Loss = 496.650
Validation Loss = 2479.054
Model Saved
Epoch 607 ...
Training Loss = 444.358
Validation Loss = 2425.523
Model Saved
Epoch 608 ...
Training Loss = 733.285
Validation Loss = 2444.274
Model Saved
Epoch 609 ...
Training Loss = 460.827
Validation Loss = 2427.444
Model Saved
Epoch 610 ...
Training Loss = 502.525
Validation Loss = 2440.623
Model Saved
Epoch 611 ...
Training Loss = 644.500
Validation Loss = 2386.280
Model Saved
Epoch 612 ...
Training Loss = 552.592
Validation Loss = 2406.026
Model Saved
Epoch 613 ...
Training Loss = 518.485
Validation Loss = 2401.445
Model Saved
Epoch 614 ...
Training Loss = 476.460
Validation Loss = 2420.509
Model Saved
Epoch 615 ...
Training Loss = 664.059
Validation Loss = 2358.427
Model Saved
Epoch 616 ...
Training Loss = 473.329
Validation Loss = 2461.978
Model Saved
Epoch 617 ...
Training Loss = 473.806
Validation Loss = 2444.954
Model Saved
Epoch 618 ...
Training Loss = 664.908
Validation Loss = 2474.066
Model Saved
Epoch 619 ...
Training Loss = 574.330
Validation Loss = 2506.834
Model Saved
Epoch 620 ...
Training Loss = 533.448
Validation Loss = 2429.693
Model Saved
Epoch 621 ...
Training Loss = 717.964
Validation Loss = 2478.504
Model Saved
Epoch 622 ...
Training Loss = 448.906
Validation Loss = 2413.856
Model Saved
Epoch 623 ...
Training Loss = 563.887
Validation Loss = 2489.429
Model Saved
Epoch 624 ...
Training Loss = 594.322
Validation Loss = 2514.783
Model Saved
Epoch 625 ...
Training Loss = 512.952
Validation Loss = 2476.387
Model Saved
Epoch 626 ...
Training Loss = 675.239
Validation Loss = 2381.568
Model Saved
Epoch 627 ...
Training Loss = 516.901
Validation Loss = 2408.023
Model Saved
Epoch 628 ...
Training Loss = 505.342
Validation Loss = 2448.752
Model Saved
Epoch 629 ...
Training Loss = 500.827
Validation Loss = 2453.980
Model Saved
Epoch 630 ...
Training Loss = 565.797
Validation Loss = 2417.361
Model Saved
Epoch 631 ...
Training Loss = 649.298
Validation Loss = 2406.269
Model Saved
Epoch 632 ...
Training Loss = 454.817
Validation Loss = 2441.631
Model Saved
Epoch 633 ...
Training Loss = 583.670
Validation Loss = 2402.523
Model Saved
Epoch 634 ...
Training Loss = 518.509
Validation Loss = 2361.814
Model Saved
Epoch 635 ...
Training Loss = 564.842
Validation Loss = 2448.535
Model Saved
Epoch 636 ...
Training Loss = 499.494
Validation Loss = 2444.495
Model Saved
Epoch 637 ...
Training Loss = 540.799
Validation Loss = 2494.896
Model Saved
Epoch 638 ...
Training Loss = 615.387
Validation Loss = 2462.451
Model Saved
Epoch 639 ...
Training Loss = 529.496
Validation Loss = 2496.320
Model Saved
Epoch 640 ...
Training Loss = 593.024
Validation Loss = 2385.301
Model Saved
Epoch 641 ...
Training Loss = 668.219
Validation Loss = 2349.562
Model Saved
Epoch 642 ...
Training Loss = 546.523
Validation Loss = 2453.615
Model Saved
Epoch 643 ...
Training Loss = 464.046
Validation Loss = 2399.236
Model Saved
Epoch 644 ...
Training Loss = 515.615
Validation Loss = 2430.814
Model Saved
Epoch 645 ...
Training Loss = 468.864
Validation Loss = 2446.881
Model Saved
Epoch 646 ...
Training Loss = 569.768
Validation Loss = 2376.573
Model Saved
Epoch 647 ...
Training Loss = 519.546
Validation Loss = 2441.117
Model Saved
Epoch 648 ...
Training Loss = 539.145
Validation Loss = 2420.957
Model Saved
Epoch 649 ...
Training Loss = 530.479
Validation Loss = 2461.121
Model Saved
Epoch 650 ...
Training Loss = 489.426
Validation Loss = 2415.538
Model Saved
Epoch 651 ...
Training Loss = 566.168
Validation Loss = 2395.747
Model Saved
Epoch 652 ...
Training Loss = 460.623
Validation Loss = 2369.207
Model Saved
Epoch 653 ...
Training Loss = 486.338
Validation Loss = 2367.972
Model Saved
Epoch 654 ...
Training Loss = 538.865
Validation Loss = 2405.225
Model Saved
Epoch 655 ...
Training Loss = 446.737
Validation Loss = 2405.789
Model Saved
Epoch 656 ...
Training Loss = 538.044
Validation Loss = 2447.354
Model Saved
Epoch 657 ...
Training Loss = 606.342
Validation Loss = 2435.101
Model Saved
Epoch 658 ...
Training Loss = 606.949
Validation Loss = 2411.281
Model Saved
Epoch 659 ...
Training Loss = 566.572
Validation Loss = 2448.724
Model Saved
Epoch 660 ...
Training Loss = 512.050
Validation Loss = 2414.191
Model Saved
Epoch 661 ...
Training Loss = 425.362
Validation Loss = 2366.521
Model Saved
Epoch 662 ...
Training Loss = 478.633
Validation Loss = 2371.995
Model Saved
Epoch 663 ...
Training Loss = 567.402
Validation Loss = 2382.433
Model Saved
Epoch 664 ...
Training Loss = 560.196
Validation Loss = 2381.062
Model Saved
Epoch 665 ...
Training Loss = 636.971
Validation Loss = 2437.703
Model Saved
Epoch 666 ...
Training Loss = 513.818
Validation Loss = 2375.263
Model Saved
Epoch 667 ...
Training Loss = 450.629
Validation Loss = 2406.183
Model Saved
Epoch 668 ...
Training Loss = 609.164
Validation Loss = 2354.323
Model Saved
Epoch 669 ...
Training Loss = 490.727
Validation Loss = 2440.182
Model Saved
Epoch 670 ...
Training Loss = 544.306
Validation Loss = 2405.613
Model Saved
Epoch 671 ...
Training Loss = 497.165
Validation Loss = 2400.708
Model Saved
Epoch 672 ...
Training Loss = 560.151
Validation Loss = 2424.295
Model Saved
Epoch 673 ...
Training Loss = 619.409
Validation Loss = 2397.605
Model Saved
Epoch 674 ...
Training Loss = 513.701
Validation Loss = 2417.273
Model Saved
Epoch 675 ...
Training Loss = 550.910
Validation Loss = 2368.324
Model Saved
Epoch 676 ...
Training Loss = 672.546
Validation Loss = 2424.823
Model Saved
Epoch 677 ...
Training Loss = 621.934
Validation Loss = 2379.606
Model Saved
Epoch 678 ...
Training Loss = 473.513
Validation Loss = 2422.356
Model Saved
Epoch 679 ...
Training Loss = 551.652
Validation Loss = 2437.209
Model Saved
Epoch 680 ...
Training Loss = 458.024
Validation Loss = 2407.156
Model Saved
Epoch 681 ...
Training Loss = 554.550
Validation Loss = 2401.166
Model Saved
Epoch 682 ...
Training Loss = 484.403
Validation Loss = 2439.964
Model Saved
Epoch 683 ...
Training Loss = 665.913
Validation Loss = 2465.569
Model Saved
Epoch 684 ...
Training Loss = 519.842
Validation Loss = 2461.958
Model Saved
Epoch 685 ...
Training Loss = 497.622
Validation Loss = 2452.610
Model Saved
Epoch 686 ...
Training Loss = 613.970
Validation Loss = 2414.159
Model Saved
Epoch 687 ...
Training Loss = 468.593
Validation Loss = 2377.614
Model Saved
Epoch 688 ...
Training Loss = 482.703
Validation Loss = 2423.148
Model Saved
Epoch 689 ...
Training Loss = 495.270
Validation Loss = 2375.726
Model Saved
Epoch 690 ...
Training Loss = 530.499
Validation Loss = 2390.387
Model Saved
Epoch 691 ...
Training Loss = 536.091
Validation Loss = 2352.844
Model Saved
Epoch 692 ...
Training Loss = 397.088
Validation Loss = 2417.939
Model Saved
Epoch 693 ...
Training Loss = 512.897
Validation Loss = 2446.358
Model Saved
Epoch 694 ...
Training Loss = 449.078
Validation Loss = 2423.555
Model Saved
Epoch 695 ...
Training Loss = 672.751
Validation Loss = 2442.830
Model Saved
Epoch 696 ...
Training Loss = 471.669
Validation Loss = 2423.254
Model Saved
Epoch 697 ...
Training Loss = 566.529
Validation Loss = 2380.761
Model Saved
Epoch 698 ...
Training Loss = 588.577
Validation Loss = 2381.349
Model Saved
Epoch 699 ...
Training Loss = 592.500
Validation Loss = 2452.097
Model Saved
Epoch 700 ...
Training Loss = 430.858
Validation Loss = 2408.023
Model Saved
Epoch 701 ...
Training Loss = 526.168
Validation Loss = 2388.560
Model Saved
Epoch 702 ...
Training Loss = 644.190
Validation Loss = 2389.206
Model Saved
Epoch 703 ...
Training Loss = 443.048
Validation Loss = 2470.805
Model Saved
Epoch 704 ...
Training Loss = 471.644
Validation Loss = 2417.079
Model Saved
Epoch 705 ...
Training Loss = 613.346
Validation Loss = 2428.866
Model Saved
Epoch 706 ...
Training Loss = 543.755
Validation Loss = 2534.433
Model Saved
Epoch 707 ...
Training Loss = 579.394
Validation Loss = 2364.329
Model Saved
Epoch 708 ...
Training Loss = 552.237
Validation Loss = 2433.835
Model Saved
Epoch 709 ...
Training Loss = 431.011
Validation Loss = 2384.133
Model Saved
Epoch 710 ...
Training Loss = 502.161
Validation Loss = 2412.233
Model Saved
Epoch 711 ...
Training Loss = 430.198
Validation Loss = 2415.683
Model Saved
Epoch 712 ...
Training Loss = 463.874
Validation Loss = 2466.245
Model Saved
Epoch 713 ...
Training Loss = 518.154
Validation Loss = 2455.942
Model Saved
Epoch 714 ...
Training Loss = 593.812
Validation Loss = 2437.276
Model Saved
Epoch 715 ...
Training Loss = 462.995
Validation Loss = 2383.641
Model Saved
Epoch 716 ...
Training Loss = 576.487
Validation Loss = 2435.699
Model Saved
Epoch 717 ...
Training Loss = 448.993
Validation Loss = 2393.666
Model Saved
Epoch 718 ...
Training Loss = 505.239
Validation Loss = 2410.952
Model Saved
Epoch 719 ...
Training Loss = 447.248
Validation Loss = 2417.587
Model Saved
Epoch 720 ...
Training Loss = 522.394
Validation Loss = 2395.560
Model Saved
Epoch 721 ...
Training Loss = 476.843
Validation Loss = 2407.853
Model Saved
Epoch 722 ...
Training Loss = 659.943
Validation Loss = 2358.153
Model Saved
Epoch 723 ...
Training Loss = 616.244
Validation Loss = 2432.632
Model Saved
Epoch 724 ...
Training Loss = 499.713
Validation Loss = 2398.012
Model Saved
Epoch 725 ...
Training Loss = 484.346
Validation Loss = 2430.056
Model Saved
Epoch 726 ...
Training Loss = 476.301
Validation Loss = 2472.987
Model Saved
Epoch 727 ...
Training Loss = 548.568
Validation Loss = 2427.554
Model Saved
Epoch 728 ...
Training Loss = 479.596
Validation Loss = 2359.989
Model Saved
Epoch 729 ...
Training Loss = 579.625
Validation Loss = 2383.394
Model Saved
Epoch 730 ...
Training Loss = 484.855
Validation Loss = 2402.550
Model Saved
Epoch 731 ...
Training Loss = 525.173
Validation Loss = 2473.980
Model Saved
Epoch 732 ...
Training Loss = 457.793
Validation Loss = 2446.023
Model Saved
Epoch 733 ...
Training Loss = 393.683
Validation Loss = 2454.990
Model Saved
Epoch 734 ...
Training Loss = 506.464
Validation Loss = 2412.081
Model Saved
Epoch 735 ...
Training Loss = 510.006
Validation Loss = 2495.034
Model Saved
Epoch 736 ...
Training Loss = 479.943
Validation Loss = 2433.022
Model Saved
Epoch 737 ...
Training Loss = 472.965
Validation Loss = 2404.711
Model Saved
Epoch 738 ...
Training Loss = 455.383
Validation Loss = 2434.719
Model Saved
Epoch 739 ...
Training Loss = 398.968
Validation Loss = 2383.797
Model Saved
Epoch 740 ...
Training Loss = 502.895
Validation Loss = 2398.368
Model Saved
Epoch 741 ...
Training Loss = 490.303
Validation Loss = 2375.445
Model Saved
Epoch 742 ...
Training Loss = 492.860
Validation Loss = 2445.149
Model Saved
Epoch 743 ...
Training Loss = 466.603
Validation Loss = 2401.876
Model Saved
Epoch 744 ...
Training Loss = 596.628
Validation Loss = 2383.932
Model Saved
Epoch 745 ...
Training Loss = 445.410
Validation Loss = 2425.314
Model Saved
Epoch 746 ...
Training Loss = 492.805
Validation Loss = 2353.922
Model Saved
Epoch 747 ...
Training Loss = 492.709
Validation Loss = 2389.755
Model Saved
Epoch 748 ...
Training Loss = 486.470
Validation Loss = 2448.424
Model Saved
Epoch 749 ...
Training Loss = 472.962
Validation Loss = 2363.030
Model Saved
Epoch 750 ...
Training Loss = 507.866
Validation Loss = 2393.025
Model Saved
Epoch 751 ...
Training Loss = 521.083
Validation Loss = 2439.314
Model Saved
Epoch 752 ...
Training Loss = 557.685
Validation Loss = 2397.334
Model Saved
Epoch 753 ...
Training Loss = 515.526
Validation Loss = 2416.916
Model Saved
Epoch 754 ...
Training Loss = 602.968
Validation Loss = 2411.262
Model Saved
Epoch 755 ...
Training Loss = 673.040
Validation Loss = 2429.169
Model Saved
Epoch 756 ...
Training Loss = 429.047
Validation Loss = 2402.190
Model Saved
Epoch 757 ...
Training Loss = 552.452
Validation Loss = 2375.947
Model Saved
Epoch 758 ...
Training Loss = 588.530
Validation Loss = 2385.617
Model Saved
Epoch 759 ...
Training Loss = 447.694
Validation Loss = 2393.151
Model Saved
Epoch 760 ...
Training Loss = 458.626
Validation Loss = 2424.034
Model Saved
Epoch 761 ...
Training Loss = 506.779
Validation Loss = 2373.553
Model Saved
Epoch 762 ...
Training Loss = 493.946
Validation Loss = 2465.943
Model Saved
Epoch 763 ...
Training Loss = 544.076
Validation Loss = 2426.661
Model Saved
Epoch 764 ...
Training Loss = 496.081
Validation Loss = 2428.771
Model Saved
Epoch 765 ...
Training Loss = 505.286
Validation Loss = 2396.331
Model Saved
Epoch 766 ...
Training Loss = 518.783
Validation Loss = 2444.267
Model Saved
Epoch 767 ...
Training Loss = 447.759
Validation Loss = 2399.853
Model Saved
Epoch 768 ...
Training Loss = 519.725
Validation Loss = 2442.304
Model Saved
Epoch 769 ...
Training Loss = 558.445
Validation Loss = 2423.297
Model Saved
Epoch 770 ...
Training Loss = 421.902
Validation Loss = 2391.643
Model Saved
Epoch 771 ...
Training Loss = 417.749
Validation Loss = 2405.849
Model Saved
Epoch 772 ...
Training Loss = 477.399
Validation Loss = 2353.430
Model Saved
Epoch 773 ...
Training Loss = 527.224
Validation Loss = 2403.264
Model Saved
Epoch 774 ...
Training Loss = 643.712
Validation Loss = 2390.622
Model Saved
Epoch 775 ...
Training Loss = 483.760
Validation Loss = 2454.179
Model Saved
Epoch 776 ...
Training Loss = 590.718
Validation Loss = 2396.731
Model Saved
Epoch 777 ...
Training Loss = 435.515
Validation Loss = 2428.835
Model Saved
Epoch 778 ...
Training Loss = 480.587
Validation Loss = 2430.252
Model Saved
Epoch 779 ...
Training Loss = 417.358
Validation Loss = 2388.248
Model Saved
Epoch 780 ...
Training Loss = 496.136
Validation Loss = 2476.151
Model Saved
Epoch 781 ...
Training Loss = 440.625
Validation Loss = 2385.699
Model Saved
Epoch 782 ...
Training Loss = 462.054
Validation Loss = 2302.087
Model Saved
Epoch 783 ...
Training Loss = 462.036
Validation Loss = 2355.286
Model Saved
Epoch 784 ...
Training Loss = 452.461
Validation Loss = 2473.729
Model Saved
Epoch 785 ...
Training Loss = 397.103
Validation Loss = 2463.040
Model Saved
Epoch 786 ...
Training Loss = 439.860
Validation Loss = 2439.589
Model Saved
Epoch 787 ...
Training Loss = 437.392
Validation Loss = 2328.467
Model Saved
Epoch 788 ...
Training Loss = 461.461
Validation Loss = 2411.325
Model Saved
Epoch 789 ...
Training Loss = 487.983
Validation Loss = 2436.652
Model Saved
Epoch 790 ...
Training Loss = 426.271
Validation Loss = 2389.415
Model Saved
Epoch 791 ...
Training Loss = 557.759
Validation Loss = 2401.563
Model Saved
Epoch 792 ...
Training Loss = 571.341
Validation Loss = 2368.693
Model Saved
Epoch 793 ...
Training Loss = 500.245
Validation Loss = 2376.971
Model Saved
Epoch 794 ...
Training Loss = 481.169
Validation Loss = 2395.556
Model Saved
Epoch 795 ...
Training Loss = 506.633
Validation Loss = 2408.696
Model Saved
Epoch 796 ...
Training Loss = 423.419
Validation Loss = 2384.619
Model Saved
Epoch 797 ...
Training Loss = 565.095
Validation Loss = 2428.161
Model Saved
Epoch 798 ...
Training Loss = 476.999
Validation Loss = 2393.512
Model Saved
Epoch 799 ...
Training Loss = 545.649
Validation Loss = 2343.718
Model Saved
Epoch 800 ...
Training Loss = 430.533
Validation Loss = 2384.636
Model Saved
Epoch 801 ...
Training Loss = 432.361
Validation Loss = 2332.598
Model Saved
Epoch 802 ...
Training Loss = 519.693
Validation Loss = 2398.092
Model Saved
Epoch 803 ...
Training Loss = 595.274
Validation Loss = 2355.641
Model Saved
Epoch 804 ...
Training Loss = 507.536
Validation Loss = 2367.197
Model Saved
Epoch 805 ...
Training Loss = 521.604
Validation Loss = 2394.402
Model Saved
Epoch 806 ...
Training Loss = 477.441
Validation Loss = 2340.783
Model Saved
Epoch 807 ...
Training Loss = 527.962
Validation Loss = 2401.724
Model Saved
Epoch 808 ...
Training Loss = 455.288
Validation Loss = 2375.995
Model Saved
Epoch 809 ...
Training Loss = 387.116
Validation Loss = 2429.453
Model Saved
Epoch 810 ...
Training Loss = 459.473
Validation Loss = 2395.304
Model Saved
Epoch 811 ...
Training Loss = 426.798
Validation Loss = 2388.429
Model Saved
Epoch 812 ...
Training Loss = 552.286
Validation Loss = 2402.428
Model Saved
Epoch 813 ...
Training Loss = 430.489
Validation Loss = 2379.142
Model Saved
Epoch 814 ...
Training Loss = 435.473
Validation Loss = 2385.819
Model Saved
Epoch 815 ...
Training Loss = 489.424
Validation Loss = 2375.228
Model Saved
Epoch 816 ...
Training Loss = 451.520
Validation Loss = 2349.053
Model Saved
Epoch 817 ...
Training Loss = 428.457
Validation Loss = 2367.094
Model Saved
Epoch 818 ...
Training Loss = 387.072
Validation Loss = 2444.620
Model Saved
Epoch 819 ...
Training Loss = 637.701
Validation Loss = 2417.220
Model Saved
Epoch 820 ...
Training Loss = 435.503
Validation Loss = 2337.948
Model Saved
Epoch 821 ...
Training Loss = 505.909
Validation Loss = 2362.935
Model Saved
Epoch 822 ...
Training Loss = 437.232
Validation Loss = 2408.990
Model Saved
Epoch 823 ...
Training Loss = 590.644
Validation Loss = 2428.208
Model Saved
Epoch 824 ...
Training Loss = 389.696
Validation Loss = 2421.568
Model Saved
Epoch 825 ...
Training Loss = 546.996
Validation Loss = 2385.222
Model Saved
Epoch 826 ...
Training Loss = 514.743
Validation Loss = 2366.524
Model Saved
Epoch 827 ...
Training Loss = 399.026
Validation Loss = 2418.365
Model Saved
Epoch 828 ...
Training Loss = 444.529
Validation Loss = 2423.619
Model Saved
Epoch 829 ...
Training Loss = 505.404
Validation Loss = 2326.864
Model Saved
Epoch 830 ...
Training Loss = 544.107
Validation Loss = 2386.330
Model Saved
Epoch 831 ...
Training Loss = 481.471
Validation Loss = 2388.829
Model Saved
Epoch 832 ...
Training Loss = 472.612
Validation Loss = 2390.979
Model Saved
Epoch 833 ...
Training Loss = 440.309
Validation Loss = 2385.644
Model Saved
Epoch 834 ...
Training Loss = 495.881
Validation Loss = 2387.494
Model Saved
Epoch 835 ...
Training Loss = 432.354
Validation Loss = 2409.367
Model Saved
Epoch 836 ...
Training Loss = 406.539
Validation Loss = 2335.667
Model Saved
Epoch 837 ...
Training Loss = 588.533
Validation Loss = 2391.059
Model Saved
Epoch 838 ...
Training Loss = 453.989
Validation Loss = 2369.379
Model Saved
Epoch 839 ...
Training Loss = 487.922
Validation Loss = 2372.226
Model Saved
Epoch 840 ...
Training Loss = 350.842
Validation Loss = 2372.071
Model Saved
Epoch 841 ...
Training Loss = 449.766
Validation Loss = 2402.760
Model Saved
Epoch 842 ...
Training Loss = 671.921
Validation Loss = 2362.511
Model Saved
Epoch 843 ...
Training Loss = 428.307
Validation Loss = 2390.781
Model Saved
Epoch 844 ...
Training Loss = 436.148
Validation Loss = 2461.192
Model Saved
Epoch 845 ...
Training Loss = 484.350
Validation Loss = 2411.870
Model Saved
Epoch 846 ...
Training Loss = 511.283
Validation Loss = 2352.851
Model Saved
Epoch 847 ...
Training Loss = 467.333
Validation Loss = 2359.836
Model Saved
Epoch 848 ...
Training Loss = 434.171
Validation Loss = 2318.535
Model Saved
Epoch 849 ...
Training Loss = 544.394
Validation Loss = 2395.194
Model Saved
Epoch 850 ...
Training Loss = 439.114
Validation Loss = 2362.873
Model Saved
Epoch 851 ...
Training Loss = 430.311
Validation Loss = 2317.488
Model Saved
Epoch 852 ...
Training Loss = 478.658
Validation Loss = 2368.410
Model Saved
Epoch 853 ...
Training Loss = 398.763
Validation Loss = 2372.625
Model Saved
Epoch 854 ...
Training Loss = 520.658
Validation Loss = 2390.597
Model Saved
Epoch 855 ...
Training Loss = 564.872
Validation Loss = 2400.466
Model Saved
Epoch 856 ...
Training Loss = 418.920
Validation Loss = 2390.255
Model Saved
Epoch 857 ...
Training Loss = 407.921
Validation Loss = 2418.385
Model Saved
Epoch 858 ...
Training Loss = 517.398
Validation Loss = 2385.407
Model Saved
Epoch 859 ...
Training Loss = 458.643
Validation Loss = 2379.975
Model Saved
Epoch 860 ...
Training Loss = 368.927
Validation Loss = 2335.952
Model Saved
Epoch 861 ...
Training Loss = 450.912
Validation Loss = 2383.816
Model Saved
Epoch 862 ...
Training Loss = 361.283
Validation Loss = 2363.724
Model Saved
Epoch 863 ...
Training Loss = 469.107
Validation Loss = 2387.637
Model Saved
Epoch 864 ...
Training Loss = 445.715
Validation Loss = 2359.166
Model Saved
Epoch 865 ...
Training Loss = 528.204
Validation Loss = 2421.516
Model Saved
Epoch 866 ...
Training Loss = 504.052
Validation Loss = 2418.280
Model Saved
Epoch 867 ...
Training Loss = 362.726
Validation Loss = 2397.351
Model Saved
Epoch 868 ...
Training Loss = 475.872
Validation Loss = 2399.336
Model Saved
Epoch 869 ...
Training Loss = 408.093
Validation Loss = 2370.576
Model Saved
Epoch 870 ...
Training Loss = 493.951
Validation Loss = 2339.790
Model Saved
Epoch 871 ...
Training Loss = 435.426
Validation Loss = 2389.690
Model Saved
Epoch 872 ...
Training Loss = 463.328
Validation Loss = 2393.888
Model Saved
Epoch 873 ...
Training Loss = 478.333
Validation Loss = 2354.486
Model Saved
Epoch 874 ...
Training Loss = 485.567
Validation Loss = 2357.593
Model Saved
Epoch 875 ...
Training Loss = 394.623
Validation Loss = 2352.865
Model Saved
Epoch 876 ...
Training Loss = 442.649
Validation Loss = 2386.095
Model Saved
Epoch 877 ...
Training Loss = 478.620
Validation Loss = 2320.822
Model Saved
Epoch 878 ...
Training Loss = 521.598
Validation Loss = 2394.605
Model Saved
Epoch 879 ...
Training Loss = 566.516
Validation Loss = 2369.190
Model Saved
Epoch 880 ...
Training Loss = 517.530
Validation Loss = 2426.114
Model Saved
Epoch 881 ...
Training Loss = 376.308
Validation Loss = 2389.047
Model Saved
Epoch 882 ...
Training Loss = 430.850
Validation Loss = 2387.452
Model Saved
Epoch 883 ...
Training Loss = 509.846
Validation Loss = 2377.522
Model Saved
Epoch 884 ...
Training Loss = 475.341
Validation Loss = 2407.821
Model Saved
Epoch 885 ...
Training Loss = 514.355
Validation Loss = 2476.500
Model Saved
Epoch 886 ...
Training Loss = 441.322
Validation Loss = 2408.686
Model Saved
Epoch 887 ...
Training Loss = 453.012
Validation Loss = 2382.715
Model Saved
Epoch 888 ...
Training Loss = 390.197
Validation Loss = 2439.918
Model Saved
Epoch 889 ...
Training Loss = 427.625
Validation Loss = 2423.094
Model Saved
Epoch 890 ...
Training Loss = 441.650
Validation Loss = 2414.042
Model Saved
Epoch 891 ...
Training Loss = 612.543
Validation Loss = 2392.463
Model Saved
Epoch 892 ...
Training Loss = 472.882
Validation Loss = 2435.288
Model Saved
Epoch 893 ...
Training Loss = 423.612
Validation Loss = 2408.342
Model Saved
Epoch 894 ...
Training Loss = 467.178
Validation Loss = 2468.025
Model Saved
Epoch 895 ...
Training Loss = 399.705
Validation Loss = 2380.130
Model Saved
Epoch 896 ...
Training Loss = 394.574
Validation Loss = 2406.646
Model Saved
Epoch 897 ...
Training Loss = 414.471
Validation Loss = 2407.793
Model Saved
Epoch 898 ...
Training Loss = 461.175
Validation Loss = 2408.375
Model Saved
Epoch 899 ...
Training Loss = 445.283
Validation Loss = 2332.729
Model Saved
Epoch 900 ...
Training Loss = 514.393
Validation Loss = 2373.465
Model Saved
Epoch 901 ...
Training Loss = 464.827
Validation Loss = 2427.386
Model Saved
Epoch 902 ...
Training Loss = 500.040
Validation Loss = 2445.763
Model Saved
Epoch 903 ...
Training Loss = 778.211
Validation Loss = 2376.972
Model Saved
Epoch 904 ...
Training Loss = 459.008
Validation Loss = 2377.731
Model Saved
Epoch 905 ...
Training Loss = 585.716
Validation Loss = 2398.438
Model Saved
Epoch 906 ...
Training Loss = 428.206
Validation Loss = 2393.939
Model Saved
Epoch 907 ...
Training Loss = 484.672
Validation Loss = 2382.665
Model Saved
Epoch 908 ...
Training Loss = 487.658
Validation Loss = 2346.436
Model Saved
Epoch 909 ...
Training Loss = 551.703
Validation Loss = 2390.815
Model Saved
Epoch 910 ...
Training Loss = 413.755
Validation Loss = 2344.684
Model Saved
Epoch 911 ...
Training Loss = 458.889
Validation Loss = 2382.303
Model Saved
Epoch 912 ...
Training Loss = 350.066
Validation Loss = 2382.093
Model Saved
Epoch 913 ...
Training Loss = 432.566
Validation Loss = 2384.289
Model Saved
Epoch 914 ...
Training Loss = 401.695
Validation Loss = 2355.328
Model Saved
Epoch 915 ...
Training Loss = 502.271
Validation Loss = 2387.575
Model Saved
Epoch 916 ...
Training Loss = 405.504
Validation Loss = 2359.578
Model Saved
Epoch 917 ...
Training Loss = 388.123
Validation Loss = 2338.778
Model Saved
Epoch 918 ...
Training Loss = 408.331
Validation Loss = 2406.931
Model Saved
Epoch 919 ...
Training Loss = 491.736
Validation Loss = 2424.633
Model Saved
Epoch 920 ...
Training Loss = 447.394
Validation Loss = 2370.630
Model Saved
Epoch 921 ...
Training Loss = 407.878
Validation Loss = 2354.583
Model Saved
Epoch 922 ...
Training Loss = 505.552
Validation Loss = 2308.475
Model Saved
Epoch 923 ...
Training Loss = 365.611
Validation Loss = 2355.280
Model Saved
Epoch 924 ...
Training Loss = 458.368
Validation Loss = 2378.852
Model Saved
Epoch 925 ...
Training Loss = 432.800
Validation Loss = 2390.130
Model Saved
Epoch 926 ...
Training Loss = 436.520
Validation Loss = 2364.621
Model Saved
Epoch 927 ...
Training Loss = 507.791
Validation Loss = 2371.684
Model Saved
Epoch 928 ...
Training Loss = 510.901
Validation Loss = 2420.969
Model Saved
Epoch 929 ...
Training Loss = 529.591
Validation Loss = 2380.461
Model Saved
Epoch 930 ...
Training Loss = 402.862
Validation Loss = 2369.705
Model Saved
Epoch 931 ...
Training Loss = 465.128
Validation Loss = 2391.592
Model Saved
Epoch 932 ...
Training Loss = 363.344
Validation Loss = 2368.185
Model Saved
Epoch 933 ...
Training Loss = 451.735
Validation Loss = 2376.489
Model Saved
Epoch 934 ...
Training Loss = 431.829
Validation Loss = 2379.574
Model Saved
Epoch 935 ...
Training Loss = 392.629
Validation Loss = 2341.931
Model Saved
Epoch 936 ...
Training Loss = 452.829
Validation Loss = 2399.913
Model Saved
Epoch 937 ...
Training Loss = 391.189
Validation Loss = 2381.289
Model Saved
Epoch 938 ...
Training Loss = 393.740
Validation Loss = 2364.091
Model Saved
Epoch 939 ...
Training Loss = 518.620
Validation Loss = 2365.340
Model Saved
Epoch 940 ...
Training Loss = 402.691
Validation Loss = 2338.122
Model Saved
Epoch 941 ...
Training Loss = 351.909
Validation Loss = 2350.104
Model Saved
Epoch 942 ...
Training Loss = 480.771
Validation Loss = 2330.314
Model Saved
Epoch 943 ...
Training Loss = 456.292
Validation Loss = 2350.184
Model Saved
Epoch 944 ...
Training Loss = 362.265
Validation Loss = 2354.665
Model Saved
Epoch 945 ...
Training Loss = 526.918
Validation Loss = 2381.735
Model Saved
Epoch 946 ...
Training Loss = 414.643
Validation Loss = 2354.238
Model Saved
Epoch 947 ...
Training Loss = 437.010
Validation Loss = 2453.109
Model Saved
Epoch 948 ...
Training Loss = 474.715
Validation Loss = 2330.943
Model Saved
Epoch 949 ...
Training Loss = 442.407
Validation Loss = 2364.073
Model Saved
Epoch 950 ...
Training Loss = 474.463
Validation Loss = 2340.710
Model Saved
Epoch 951 ...
Training Loss = 482.533
Validation Loss = 2355.846
Model Saved
Epoch 952 ...
Training Loss = 418.635
Validation Loss = 2392.016
Model Saved
Epoch 953 ...
Training Loss = 418.802
Validation Loss = 2310.434
Model Saved
Epoch 954 ...
Training Loss = 496.406
Validation Loss = 2350.295
Model Saved
Epoch 955 ...
Training Loss = 385.309
Validation Loss = 2414.760
Model Saved
Epoch 956 ...
Training Loss = 559.607
Validation Loss = 2370.438
Model Saved
Epoch 957 ...
Training Loss = 383.889
Validation Loss = 2342.993
Model Saved
Epoch 958 ...
Training Loss = 429.101
Validation Loss = 2354.719
Model Saved
Epoch 959 ...
Training Loss = 538.372
Validation Loss = 2382.075
Model Saved
Epoch 960 ...
Training Loss = 454.437
Validation Loss = 2387.320
Model Saved
Epoch 961 ...
Training Loss = 483.543
Validation Loss = 2343.838
Model Saved
Epoch 962 ...
Training Loss = 389.849
Validation Loss = 2343.115
Model Saved
Epoch 963 ...
Training Loss = 438.378
Validation Loss = 2402.778
Model Saved
Epoch 964 ...
Training Loss = 556.717
Validation Loss = 2381.932
Model Saved
Epoch 965 ...
Training Loss = 412.634
Validation Loss = 2380.422
Model Saved
Epoch 966 ...
Training Loss = 452.756
Validation Loss = 2383.450
Model Saved
Epoch 967 ...
Training Loss = 533.130
Validation Loss = 2347.720
Model Saved
Epoch 968 ...
Training Loss = 484.596
Validation Loss = 2346.102
Model Saved
Epoch 969 ...
Training Loss = 406.432
Validation Loss = 2397.308
Model Saved
Epoch 970 ...
Training Loss = 573.159
Validation Loss = 2387.088
Model Saved
Epoch 971 ...
Training Loss = 440.417
Validation Loss = 2346.896
Model Saved
Epoch 972 ...
Training Loss = 402.111
Validation Loss = 2330.395
Model Saved
Epoch 973 ...
Training Loss = 450.311
Validation Loss = 2306.175
Model Saved
Epoch 974 ...
Training Loss = 485.903
Validation Loss = 2372.568
Model Saved
Epoch 975 ...
Training Loss = 455.564
Validation Loss = 2411.334
Model Saved
Epoch 976 ...
Training Loss = 378.713
Validation Loss = 2357.247
Model Saved
Epoch 977 ...
Training Loss = 430.180
Validation Loss = 2381.575
Model Saved
Epoch 978 ...
Training Loss = 467.904
Validation Loss = 2399.050
Model Saved
Epoch 979 ...
Training Loss = 408.136
Validation Loss = 2294.871
Model Saved
Epoch 980 ...
Training Loss = 457.842
Validation Loss = 2367.178
Model Saved
Epoch 981 ...
Training Loss = 429.432
Validation Loss = 2323.846
Model Saved
Epoch 982 ...
Training Loss = 495.759
Validation Loss = 2396.870
Model Saved
Epoch 983 ...
Training Loss = 446.198
Validation Loss = 2360.419
Model Saved
Epoch 984 ...
Training Loss = 538.200
Validation Loss = 2412.724
Model Saved
Epoch 985 ...
Training Loss = 393.407
Validation Loss = 2328.064
Model Saved
Epoch 986 ...
Training Loss = 458.183
Validation Loss = 2409.507
Model Saved
Epoch 987 ...
Training Loss = 529.629
Validation Loss = 2374.357
Model Saved
Epoch 988 ...
Training Loss = 326.045
Validation Loss = 2381.152
Model Saved
Epoch 989 ...
Training Loss = 501.501
Validation Loss = 2384.604
Model Saved
Epoch 990 ...
Training Loss = 427.635
Validation Loss = 2378.882
Model Saved
Epoch 991 ...
Training Loss = 469.297
Validation Loss = 2287.692
Model Saved
Epoch 992 ...
Training Loss = 424.408
Validation Loss = 2392.241
Model Saved
Epoch 993 ...
Training Loss = 361.036
Validation Loss = 2336.752
Model Saved
Epoch 994 ...
Training Loss = 368.652
Validation Loss = 2391.430
Model Saved
Epoch 995 ...
Training Loss = 449.762
Validation Loss = 2348.417
Model Saved
Epoch 996 ...
Training Loss = 420.561
Validation Loss = 2402.764
Model Saved
Epoch 997 ...
Training Loss = 534.289
Validation Loss = 2345.730
Model Saved
Epoch 998 ...
Training Loss = 384.504
Validation Loss = 2334.007
Model Saved
Epoch 999 ...
Training Loss = 453.283
Validation Loss = 2408.305
Model Saved
Epoch 1000 ...
Training Loss = 397.362
Validation Loss = 2346.886
Model Saved
In [49]:
# TRAIN 
# Plot Training Loss
plt.plot(training_loss_array)
plt.title('Training Loss')
Out[49]:
Text(0.5,1,'Training Loss')
In [48]:
# TRAIN 
# Plot Training Loss
plt.plot(validation_loss_array)
plt.title('Training Loss')
Out[48]:
Text(0.5,1,'Training Loss')

Evaluate

In [22]:
# TEST
# Define a Function to Convert Label Image to Binary Image
def threshold_labels(img, thresh):
    # Create Binary Image
    binary_img = np.zeros((img.shape[0], img.shape[1]))
    # Create For Loops for Check ground Pixels
    for i in range(img.shape[0]):
        for j in range(img.shape[1]):
            if img[i,j] > thresh:
                binary_img[i,j] = 255
            else:
                binary_img[i,j] = 0
    return binary_img
In [36]:
# Evaluate the Model on Random Images
import time
# Use Session to Infer
with tf.Session() as sess:
    saver.restore(sess,'Model-Tensorflow/unet')
    for i in range(200,300):
        # Predict
        original_image = x_valid[i]
        img = x_valid[i]
        label_image = y_valid[i]
        start_time = time.time()
        original_image = np.reshape(original_image, [-1, 144, 144, 3])
        test_data = {x:original_image}
        test_mask = sess.run([y_pred],feed_dict=test_data)
        test_mask = np.reshape(test_mask, (144,144))
        end_time = time.time()
        print('Computation Time:', end_time - start_time)
        binary_image = threshold_labels(test_mask, 150)
        
        # Resize Image
        #img = cv2.resize(img, (1024, 2048))
        #label_image = cv2.resize(label_image, (1024, 2048))
        #binary_image = cv2.resize(binary_image, (1024, 2048))
    
        # Plot
        fig, (axis1, axis2, axis3) = plt.subplots(1, 3, figsize = (15,15))
        axis1.imshow(img)
        axis1.set_title('Input Image')
        axis2.imshow(label_image[:,:,0])
        axis2.set_title('Label')
        axis3.imshow(binary_image)
        axis3.set_title('Binary Prediction')
INFO:tensorflow:Restoring parameters from Model-Tensorflow/unet
Computation Time: 0.08917760848999023
Computation Time: 0.0036525726318359375
Computation Time: 0.003587007522583008
Computation Time: 0.0036253929138183594
Computation Time: 0.0035381317138671875
Computation Time: 0.0036668777465820312
Computation Time: 0.00481414794921875
Computation Time: 0.003613710403442383
Computation Time: 0.003474712371826172
Computation Time: 0.0036284923553466797
Computation Time: 0.003959178924560547
Computation Time: 0.0039098262786865234
Computation Time: 0.0034873485565185547
Computation Time: 0.0035698413848876953
Computation Time: 0.0034623146057128906
Computation Time: 0.003651142120361328
Computation Time: 0.003858804702758789
Computation Time: 0.0035648345947265625
Computation Time: 0.0034308433532714844
Computation Time: 0.0035216808319091797
Computation Time: 0.0036911964416503906
Computation Time: 0.003568887710571289
Computation Time: 0.0036339759826660156
Computation Time: 0.0035715103149414062
Computation Time: 0.0035724639892578125
/home/avidbots/.local/lib/python3.5/site-packages/matplotlib/pyplot.py:537: RuntimeWarning: More than 20 figures have been opened. Figures created through the pyplot interface (`matplotlib.pyplot.figure`) are retained until explicitly closed and may consume too much memory. (To control this warning, see the rcParam `figure.max_open_warning`).
  max_open_warning, RuntimeWarning)
Computation Time: 0.0035352706909179688
Computation Time: 0.003585338592529297
Computation Time: 0.0035560131072998047
Computation Time: 0.003599405288696289
Computation Time: 0.003761768341064453
Computation Time: 0.003515958786010742
Computation Time: 0.0034782886505126953
Computation Time: 0.003534078598022461
Computation Time: 0.0035142898559570312
Computation Time: 0.0036096572875976562
Computation Time: 0.0036160945892333984
Computation Time: 0.003614664077758789
Computation Time: 0.003674745559692383
Computation Time: 0.0035719871520996094
Computation Time: 0.003542184829711914
Computation Time: 0.003614664077758789
Computation Time: 0.003590822219848633
Computation Time: 0.0035979747772216797
Computation Time: 0.0034966468811035156
Computation Time: 0.0035462379455566406
Computation Time: 0.003573894500732422
Computation Time: 0.0035829544067382812
Computation Time: 0.003520488739013672
Computation Time: 0.003517627716064453
Computation Time: 0.0035064220428466797
Computation Time: 0.004522085189819336
Computation Time: 0.0035505294799804688
Computation Time: 0.0035715103149414062
Computation Time: 0.004250049591064453
Computation Time: 0.003477811813354492
Computation Time: 0.0035963058471679688
Computation Time: 0.0037169456481933594
Computation Time: 0.003609895706176758
Computation Time: 0.003535747528076172
Computation Time: 0.0035276412963867188
Computation Time: 0.003781557083129883
Computation Time: 0.0035157203674316406
Computation Time: 0.0035560131072998047
Computation Time: 0.0034723281860351562
Computation Time: 0.0034551620483398438
Computation Time: 0.0035545825958251953
Computation Time: 0.003587961196899414
Computation Time: 0.003625631332397461
Computation Time: 0.0036690235137939453
Computation Time: 0.003570556640625
Computation Time: 0.0034465789794921875
Computation Time: 0.0035860538482666016
Computation Time: 0.003668069839477539
Computation Time: 0.003630399703979492
Computation Time: 0.0034978389739990234
Computation Time: 0.0035064220428466797
Computation Time: 0.003420114517211914
Computation Time: 0.0036988258361816406
Computation Time: 0.003664255142211914
Computation Time: 0.0034780502319335938
Computation Time: 0.0034346580505371094
Computation Time: 0.003440380096435547
Computation Time: 0.003535747528076172
Computation Time: 0.003607034683227539
Computation Time: 0.003481149673461914
Computation Time: 0.003406524658203125
Computation Time: 0.0033707618713378906
Computation Time: 0.0036613941192626953
Computation Time: 0.0036504268646240234
Computation Time: 0.00348663330078125
Computation Time: 0.003416776657104492
Computation Time: 0.003432035446166992
Computation Time: 0.003537416458129883
Computation Time: 0.0035881996154785156
Computation Time: 0.0036215782165527344
Computation Time: 0.0035233497619628906
Computation Time: 0.0035500526428222656
Computation Time: 0.003671407699584961
Computation Time: 0.003715991973876953
Computation Time: 0.0035212039947509766
In [40]:
# Evaluate the Model on Random Images
import time
# Use Session to Infer
with tf.Session() as sess:
    saver.restore(sess,'Model-Tensorflow/unet')
    for i in range(200,300):
        # Predict
        original_image = x_valid[i]
        img = x_valid[i]
        label_image = y_valid[i]
        start_time = time.time()
        original_image = np.reshape(original_image, [-1, 144, 144, 3])
        test_data = {x:original_image}
        test_mask = sess.run([y_pred],feed_dict=test_data)
        test_mask = np.reshape(test_mask, (144,144))
        end_time = time.time()
        print('Computation Time:', end_time - start_time)
        binary_image = threshold_labels(test_mask, 150)
        
        # Resize Image
        #img = cv2.resize(img, (1024, 2048))
        #label_image = cv2.resize(label_image, (1024, 2048))
        #binary_image = cv2.resize(binary_image, (1024, 2048))
    
        # Plot
        fig, (axis1, axis2, axis3) = plt.subplots(1, 3, figsize = (15,15))
        axis1.imshow(img)
        axis1.set_title('Input Image')
        axis2.imshow(label_image[:,:,0])
        axis2.set_title('Label')
        axis3.imshow(binary_image)
        axis3.set_title('Binary Prediction')
INFO:tensorflow:Restoring parameters from Model-Tensorflow/unet
Computation Time: 0.08877992630004883
Computation Time: 0.003659486770629883
Computation Time: 0.0035893917083740234
Computation Time: 0.003603696823120117
Computation Time: 0.003512144088745117
Computation Time: 0.003521442413330078
Computation Time: 0.0036144256591796875
Computation Time: 0.003566741943359375
Computation Time: 0.0035114288330078125
Computation Time: 0.0033981800079345703
Computation Time: 0.0034859180450439453
Computation Time: 0.0035898685455322266
Computation Time: 0.0037779808044433594
Computation Time: 0.003695964813232422
Computation Time: 0.0036742687225341797
Computation Time: 0.0035355091094970703
Computation Time: 0.0035791397094726562
Computation Time: 0.003661632537841797
Computation Time: 0.0036270618438720703
Computation Time: 0.0034575462341308594
Computation Time: 0.0036554336547851562
Computation Time: 0.0035278797149658203
/home/avidbots/.local/lib/python3.5/site-packages/matplotlib/pyplot.py:537: RuntimeWarning: More than 20 figures have been opened. Figures created through the pyplot interface (`matplotlib.pyplot.figure`) are retained until explicitly closed and may consume too much memory. (To control this warning, see the rcParam `figure.max_open_warning`).
  max_open_warning, RuntimeWarning)
Computation Time: 0.003724813461303711
Computation Time: 0.003562450408935547
Computation Time: 0.003580808639526367
Computation Time: 0.0035185813903808594
Computation Time: 0.0038673877716064453
Computation Time: 0.00362396240234375
Computation Time: 0.003626108169555664
Computation Time: 0.0036699771881103516
Computation Time: 0.0035200119018554688
Computation Time: 0.0034379959106445312
Computation Time: 0.0035505294799804688
Computation Time: 0.0035965442657470703
Computation Time: 0.003432035446166992
Computation Time: 0.0036232471466064453
Computation Time: 0.0036089420318603516
Computation Time: 0.003620147705078125
Computation Time: 0.0036454200744628906
Computation Time: 0.003629922866821289
Computation Time: 0.0035827159881591797
Computation Time: 0.0037698745727539062
Computation Time: 0.003659963607788086
Computation Time: 0.0036149024963378906
Computation Time: 0.0035676956176757812
Computation Time: 0.0034770965576171875
Computation Time: 0.0035991668701171875
Computation Time: 0.003585338592529297
Computation Time: 0.0035333633422851562
Computation Time: 0.003481626510620117
Computation Time: 0.0036122798919677734
Computation Time: 0.0036401748657226562
Computation Time: 0.003660440444946289
Computation Time: 0.003545045852661133
Computation Time: 0.003547191619873047
Computation Time: 0.003509998321533203
Computation Time: 0.0036008358001708984
Computation Time: 0.003691434860229492
Computation Time: 0.0036096572875976562
Computation Time: 0.003564119338989258
Computation Time: 0.003582000732421875
Computation Time: 0.0036559104919433594
Computation Time: 0.00376129150390625
Computation Time: 0.003617525100708008
Computation Time: 0.003574848175048828
Computation Time: 0.0034127235412597656
Computation Time: 0.0036783218383789062
Computation Time: 0.003741025924682617
Computation Time: 0.0035448074340820312
Computation Time: 0.003484964370727539
Computation Time: 0.003524303436279297
Computation Time: 0.0035254955291748047
Computation Time: 0.0035729408264160156
Computation Time: 0.003506898880004883
Computation Time: 0.0035905838012695312
Computation Time: 0.003632783889770508
Computation Time: 0.0036432743072509766
Computation Time: 0.003720998764038086
Computation Time: 0.0035369396209716797
Computation Time: 0.0035300254821777344
Computation Time: 0.003489971160888672
Computation Time: 0.0036208629608154297
Computation Time: 0.0036389827728271484
Computation Time: 0.003360271453857422
Computation Time: 0.003340482711791992
Computation Time: 0.0033550262451171875
Computation Time: 0.003590822219848633
Computation Time: 0.003456592559814453
Computation Time: 0.0034852027893066406
Computation Time: 0.0036194324493408203
Computation Time: 0.0035071372985839844
Computation Time: 0.003586292266845703
Computation Time: 0.003570556640625
Computation Time: 0.0034432411193847656
Computation Time: 0.00347900390625
Computation Time: 0.003498077392578125
Computation Time: 0.0035555362701416016
Computation Time: 0.0036690235137939453
Computation Time: 0.0035097599029541016
Computation Time: 0.0034999847412109375
In [ ]:
# TEST
# Import Modules
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tempfile
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
from tensorflow.python.framework import graph_io

# Load Model
saver = tf.train.import_meta_graph('./Model-Tensorflow/unet.meta', clear_devices=True)
graph = tf.get_default_graph()
input_graph_def = graph.as_graph_def()
sess = tf.Session()
saver.restore(sess, "./Model-Tensorflow/unet")
print('Model Loaded!')
In [ ]:
# TEST
# Convert Model
output_node_names="y_pred"
output_graph_def = tf.graph_util.convert_variables_to_constants(sess, input_graph_def, output_node_names.split(","))
print('Model Converted!')

# Save Frozen Model
graph_io.write_graph(output_graph_def, './', './Model-Tensorflow/unet-benchmark.pb', as_text=False)
sess.close()
print('File Saved!')
In [ ]:
# TEST
# Load Model
frozen_graph="./Model-Tensorflow/unet-benchmark.pb"
with tf.gfile.GFile(frozen_graph, "rb") as f:
      graph_def = tf.GraphDef()
      graph_def.ParseFromString(f.read()) 
with tf.Graph().as_default() as graph:
      tf.import_graph_def(graph_def,
                          input_map=None,
                          return_elements=None,
                          name="")
y_pred = graph.get_tensor_by_name("y_pred:0")

# Start Session
sess= tf.Session(graph=graph)

# Predict in Loop
for i in range(400, 500):
    # Load Images from Validation
    original_image = x_valid[i]
    img = x_valid[i]
    label_image = y_valid[i]
    
    # Resize the Image to the desired Size, Preprocessing will be done exactly as done during Training
    original_image = np.reshape(original_image, [-1, 144, 144, 3])
    
    # Initialize
    images = []
    images.append(original_image)
    images = np.array(images, dtype=np.uint8)
    images = images.astype('float32')

    # NOTE: The input to the network is of shape [None image_size image_size num_channels]. 
    # Reshape Image
    x_batch = images.reshape(1, 144, 144, 3)

    # Feed the Images to the Input Placeholders
    x = graph.get_tensor_by_name("x:0")

    # Create the 'feed_dict', Required to be fed to Calculate 'y_pred' 
    feed_dict_testing = {x: x_batch}

    # Run Inference
    start_time = time.time()
    result=sess.run(y_pred, feed_dict=feed_dict_testing)
    test_mask = np.array(result).reshape(144,144)
    end_time = time.time()
    print('Computation Time:', end_time - start_time)
    binary_image = threshold_labels(test_mask, 100)

    # Resize Image
    label_image = resize(label_image, (200, 560), anti_aliasing=True)
    binary_image = resize(binary_image, (200, 560), anti_aliasing=True)

    # Plot
    fig, (axis1, axis2, axis3) = plt.subplots(1, 3, figsize = (12,12))
    axis1.imshow(image[:,:,0])
    axis1.set_title('Input Image')
    axis2.imshow(label_image[:,:,0])
    axis2.set_title('Label Image')
    axis3.imshow(binary_image)
    axis3.set_title('Binary Prediction')